in_source_id
string | before_files
list | after_files
list | pr_diff
string | issue
string |
---|---|---|---|---|
numpy__numpy-12439 | [
{
"content": "from __future__ import division, print_function\n\nimport os\nimport sys\nimport pickle\nimport copy\nimport warnings\nimport platform\nfrom os.path import join\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer\nfrom distutils.sysconfig import get_config_var\nfrom numpy._build_utils.apple_accelerate import (\n uses_accelerate_framework, get_sgemv_fix\n )\nfrom numpy.compat import npy_load_module\nfrom setup_common import *\n\n# Set to True to enable relaxed strides checking. This (mostly) means\n# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.\nNPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\")\n\n# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a\n# bogus value for affected strides in order to help smoke out bad stride usage\n# when relaxed stride checking is enabled.\nNPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', \"0\") != \"0\")\nNPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING\n\n# XXX: ugly, we use a class to avoid calling twice some expensive functions in\n# config.h/numpyconfig.h. I don't see a better way because distutils force\n# config.h generation inside an Extension class, and as such sharing\n# configuration information between extensions is not easy.\n# Using a pickled-based memoize does not work because config_cmd is an instance\n# method, which cPickle does not like.\n#\n# Use pickle in all cases, as cPickle is gone in python3 and the difference\n# in time is only in build. -- Charles Harris, 2013-03-30\n\nclass CallOnceOnly(object):\n def __init__(self):\n self._check_types = None\n self._check_ieee_macros = None\n self._check_complex = None\n\n def check_types(self, *a, **kw):\n if self._check_types is None:\n out = check_types(*a, **kw)\n self._check_types = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_types))\n return out\n\n def check_ieee_macros(self, *a, **kw):\n if self._check_ieee_macros is None:\n out = check_ieee_macros(*a, **kw)\n self._check_ieee_macros = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_ieee_macros))\n return out\n\n def check_complex(self, *a, **kw):\n if self._check_complex is None:\n out = check_complex(*a, **kw)\n self._check_complex = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_complex))\n return out\n\ndef pythonlib_dir():\n \"\"\"return path where libpython* is.\"\"\"\n if sys.platform == 'win32':\n return os.path.join(sys.prefix, \"libs\")\n else:\n return get_config_var('LIBDIR')\n\ndef is_npy_no_signal():\n \"\"\"Return True if the NPY_NO_SIGNAL symbol must be defined in configuration\n header.\"\"\"\n return sys.platform == 'win32'\n\ndef is_npy_no_smp():\n \"\"\"Return True if the NPY_NO_SMP symbol must be defined in public\n header (when SMP support cannot be reliably enabled).\"\"\"\n # Perhaps a fancier check is in order here.\n # so that threads are only enabled if there\n # are actually multiple CPUS? -- but\n # threaded code can be nice even on a single\n # CPU so that long-calculating code doesn't\n # block.\n return 'NPY_NOSMP' in os.environ\n\ndef win32_checks(deflist):\n from numpy.distutils.misc_util import get_build_architecture\n a = get_build_architecture()\n\n # Distutils hack on AMD64 on windows\n print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %\n (a, os.name, sys.platform))\n if a == 'AMD64':\n deflist.append('DISTUTILS_USE_SDK')\n\n # On win32, force long double format string to be 'g', not\n # 'Lg', since the MS runtime does not support long double whose\n # size is > sizeof(double)\n if a == \"Intel\" or a == \"AMD64\":\n deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')\n\ndef check_math_capabilities(config, moredefs, mathlibs):\n def check_func(func_name):\n return config.check_func(func_name, libraries=mathlibs,\n decl=True, call=True)\n\n def check_funcs_once(funcs_name):\n decl = dict([(f, True) for f in funcs_name])\n st = config.check_funcs_once(funcs_name, libraries=mathlibs,\n decl=decl, call=decl)\n if st:\n moredefs.extend([(fname2def(f), 1) for f in funcs_name])\n return st\n\n def check_funcs(funcs_name):\n # Use check_funcs_once first, and if it does not work, test func per\n # func. Return success only if all the functions are available\n if not check_funcs_once(funcs_name):\n # Global check failed, check func per func\n for f in funcs_name:\n if check_func(f):\n moredefs.append((fname2def(f), 1))\n return 0\n else:\n return 1\n\n #use_msvc = config.check_decl(\"_MSC_VER\")\n\n if not check_funcs_once(MANDATORY_FUNCS):\n raise SystemError(\"One of the required function to build numpy is not\"\n \" available (the list is %s).\" % str(MANDATORY_FUNCS))\n\n # Standard functions which may not be available and for which we have a\n # replacement implementation. Note that some of these are C99 functions.\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n for f in OPTIONAL_STDFUNCS_MAYBE:\n if config.check_decl(fname2def(f),\n headers=[\"Python.h\", \"math.h\"]):\n OPTIONAL_STDFUNCS.remove(f)\n\n check_funcs(OPTIONAL_STDFUNCS)\n\n for h in OPTIONAL_HEADERS:\n if config.check_func(\"\", decl=False, call=False, headers=[h]):\n h = h.replace(\".\", \"_\").replace(os.path.sep, \"_\")\n moredefs.append((fname2def(h), 1))\n\n for tup in OPTIONAL_INTRINSICS:\n headers = None\n if len(tup) == 2:\n f, args, m = tup[0], tup[1], fname2def(tup[0])\n elif len(tup) == 3:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])\n else:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])\n if config.check_func(f, decl=False, call=True, call_args=args,\n headers=headers):\n moredefs.append((m, 1))\n\n for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:\n if config.check_gcc_function_attribute(dec, fn):\n moredefs.append((fname2def(fn), 1))\n\n for fn in OPTIONAL_VARIABLE_ATTRIBUTES:\n if config.check_gcc_variable_attribute(fn):\n m = fn.replace(\"(\", \"_\").replace(\")\", \"_\")\n moredefs.append((fname2def(m), 1))\n\n # C99 functions: float and long double versions\n check_funcs(C99_FUNCS_SINGLE)\n check_funcs(C99_FUNCS_EXTENDED)\n\ndef check_complex(config, mathlibs):\n priv = []\n pub = []\n\n try:\n if os.uname()[0] == \"Interix\":\n warnings.warn(\"Disabling broken complex support. See #1365\", stacklevel=2)\n return priv, pub\n except Exception:\n # os.uname not available on all platforms. blanket except ugly but safe\n pass\n\n # Check for complex support\n st = config.check_header('complex.h')\n if st:\n priv.append(('HAVE_COMPLEX_H', 1))\n pub.append(('NPY_USE_C99_COMPLEX', 1))\n\n for t in C99_COMPLEX_TYPES:\n st = config.check_type(t, headers=[\"complex.h\"])\n if st:\n pub.append(('NPY_HAVE_%s' % type2def(t), 1))\n\n def check_prec(prec):\n flist = [f + prec for f in C99_COMPLEX_FUNCS]\n decl = dict([(f, True) for f in flist])\n if not config.check_funcs_once(flist, call=decl, decl=decl,\n libraries=mathlibs):\n for f in flist:\n if config.check_func(f, call=True, decl=True,\n libraries=mathlibs):\n priv.append((fname2def(f), 1))\n else:\n priv.extend([(fname2def(f), 1) for f in flist])\n\n check_prec('')\n check_prec('f')\n check_prec('l')\n\n return priv, pub\n\ndef check_ieee_macros(config):\n priv = []\n pub = []\n\n macros = []\n\n def _add_decl(f):\n priv.append(fname2def(\"decl_%s\" % f))\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n _macros = [\"isnan\", \"isinf\", \"signbit\", \"isfinite\"]\n for f in _macros:\n py_symbol = fname2def(\"decl_%s\" % f)\n already_declared = config.check_decl(py_symbol,\n headers=[\"Python.h\", \"math.h\"])\n if already_declared:\n if config.check_macro_true(py_symbol,\n headers=[\"Python.h\", \"math.h\"]):\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n else:\n macros.append(f)\n # Normally, isnan and isinf are macro (C99), but some platforms only have\n # func, or both func and macro version. Check for macro only, and define\n # replacement ones if not found.\n # Note: including Python.h is necessary because it modifies some math.h\n # definitions\n for f in macros:\n st = config.check_decl(f, headers=[\"Python.h\", \"math.h\"])\n if st:\n _add_decl(f)\n\n return priv, pub\n\ndef check_types(config_cmd, ext, build_dir):\n private_defines = []\n public_defines = []\n\n # Expected size (in number of bytes) for each type. This is an\n # optimization: those are only hints, and an exhaustive search for the size\n # is done if the hints are wrong.\n expected = {'short': [2], 'int': [4], 'long': [8, 4],\n 'float': [4], 'double': [8], 'long double': [16, 12, 8],\n 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],\n 'off_t': [8, 4]}\n\n # Check we have the python header (-dev* packages on Linux)\n result = config_cmd.check_header('Python.h')\n if not result:\n python = 'python'\n if '__pypy__' in sys.builtin_module_names:\n python = 'pypy'\n raise SystemError(\n \"Cannot compile 'Python.h'. Perhaps you need to \"\n \"install {0}-dev|{0}-devel.\".format(python))\n res = config_cmd.check_header(\"endian.h\")\n if res:\n private_defines.append(('HAVE_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_ENDIAN_H', 1))\n res = config_cmd.check_header(\"sys/endian.h\")\n if res:\n private_defines.append(('HAVE_SYS_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))\n\n # Check basic types sizes\n for type in ('short', 'int', 'long'):\n res = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type), headers=[\"Python.h\"])\n if res:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), \"SIZEOF_%s\" % sym2def(type)))\n else:\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n for type in ('float', 'double', 'long double'):\n already_declared = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type),\n headers=[\"Python.h\"])\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n if not already_declared and not type == 'long double':\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # Compute size of corresponding complex type: used to check that our\n # definition is binary compatible with C99 complex type (check done at\n # build time in npy_common.h)\n complex_def = \"struct {%s __x; %s __y;}\" % (type, type)\n res = config_cmd.check_type_size(complex_def,\n expected=[2 * x for x in expected[type]])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % complex_def)\n\n for type in ('Py_intptr_t', 'off_t'):\n res = config_cmd.check_type_size(type, headers=[\"Python.h\"],\n library_dirs=[pythonlib_dir()],\n expected=expected[type])\n\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # We check declaration AND type because that's how distutils does it.\n if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):\n res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],\n library_dirs=[pythonlib_dir()],\n expected=expected['PY_LONG_LONG'])\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'PY_LONG_LONG')\n\n res = config_cmd.check_type_size('long long',\n expected=expected['long long'])\n if res >= 0:\n #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'long long')\n\n if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):\n raise RuntimeError(\n \"Config wo CHAR_BIT is not supported\"\n \", please contact the maintainers\")\n\n return private_defines, public_defines\n\ndef check_mathlib(config_cmd):\n # Testing the C math library\n mathlibs = []\n mathlibs_choices = [[], ['m'], ['cpml']]\n mathlib = os.environ.get('MATHLIB')\n if mathlib:\n mathlibs_choices.insert(0, mathlib.split(','))\n for libs in mathlibs_choices:\n if config_cmd.check_func(\"exp\", libraries=libs, decl=True, call=True):\n mathlibs = libs\n break\n else:\n raise EnvironmentError(\"math library missing; rerun \"\n \"setup.py after setting the \"\n \"MATHLIB env variable\")\n return mathlibs\n\ndef visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n if config.check_compiler_gcc4():\n return '__attribute__((visibility(\"hidden\")))'\n else:\n return ''\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, dot_join\n from numpy.distutils.system_info import get_info\n\n config = Configuration('core', parent_package, top_path)\n local_dir = config.local_path\n codegen_dir = join(local_dir, 'code_generators')\n\n if is_released(config):\n warnings.simplefilter('error', MismatchCAPIWarning)\n\n # Check whether we have a mismatch between the set C API VERSION and the\n # actual C API VERSION\n check_api_version(C_API_VERSION, codegen_dir)\n\n generate_umath_py = join(codegen_dir, 'generate_umath.py')\n n = dot_join(config.name, 'generate_umath')\n generate_umath = npy_load_module('_'.join(n.split('.')),\n generate_umath_py, ('.py', 'U', 1))\n\n header_dir = 'include/numpy' # this is relative to config.path_in_package\n\n cocache = CallOnceOnly()\n\n def generate_config_h(ext, build_dir):\n target = join(build_dir, header_dir, 'config.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)\n\n # Check math library and C99 math funcs availability\n mathlibs = check_mathlib(config_cmd)\n moredefs.append(('MATHLIB', ','.join(mathlibs)))\n\n check_math_capabilities(config_cmd, moredefs, mathlibs)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])\n\n # Signal check\n if is_npy_no_signal():\n moredefs.append('__NPY_PRIVATE_NO_SIGNAL')\n\n # Windows checks\n if sys.platform == 'win32' or os.name == 'nt':\n win32_checks(moredefs)\n\n # C99 restrict keyword\n moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))\n\n # Inline check\n inline = config_cmd.check_inline()\n\n # Use relaxed stride checking\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Use bogus stride debug aid when relaxed strides are enabled\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Get long double representation\n rep = check_long_double_representation(config_cmd)\n moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))\n\n # Py3K check\n if sys.version_info[0] == 3:\n moredefs.append(('NPY_PY3K', 1))\n\n # Generate the config.h file from moredefs\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # define inline to our keyword, or nothing\n target_f.write('#ifndef __cplusplus\\n')\n if inline == 'inline':\n target_f.write('/* #undef inline */\\n')\n else:\n target_f.write('#define inline %s\\n' % inline)\n target_f.write('#endif\\n')\n\n # add the guard to make sure config.h is never included directly,\n # but always through npy_config.h\n target_f.write(\"\"\"\n#ifndef _NPY_NPY_CONFIG_H_\n#error config.h should never be included directly, include npy_config.h instead\n#endif\n\"\"\")\n\n target_f.close()\n print('File:', target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n else:\n mathlibs = []\n target_f = open(target)\n for line in target_f:\n s = '#define MATHLIB'\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n target_f.close()\n\n # Ugly: this can be called within a library and not an extension,\n # in which case there is no libraries attributes (and none is\n # needed).\n if hasattr(ext, 'libraries'):\n ext.libraries.extend(mathlibs)\n\n incl_dir = os.path.dirname(target)\n if incl_dir not in config.numpy_include_dirs:\n config.numpy_include_dirs.append(incl_dir)\n\n return target\n\n def generate_numpyconfig_h(ext, build_dir):\n \"\"\"Depends on config.h: generate_config_h has to be called before !\"\"\"\n # put common include directory in build_dir on search path\n # allows using code generation in headers headers\n config.add_include_dirs(join(build_dir, \"src\", \"common\"))\n config.add_include_dirs(join(build_dir, \"src\", \"npymath\"))\n\n target = join(build_dir, header_dir, '_numpyconfig.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)\n\n if is_npy_no_signal():\n moredefs.append(('NPY_NO_SIGNAL', 1))\n\n if is_npy_no_smp():\n moredefs.append(('NPY_NO_SMP', 1))\n else:\n moredefs.append(('NPY_NO_SMP', 0))\n\n mathlibs = check_mathlib(config_cmd)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Check whether we can use inttypes (C99) formats\n if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):\n moredefs.append(('NPY_USE_C99_FORMATS', 1))\n\n # visibility check\n hidden_visibility = visibility_define(config_cmd)\n moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))\n\n # Add the C API/ABI versions\n moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))\n moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))\n\n # Add moredefs to header\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # Define __STDC_FORMAT_MACROS\n target_f.write(\"\"\"\n#ifndef __STDC_FORMAT_MACROS\n#define __STDC_FORMAT_MACROS 1\n#endif\n\"\"\")\n target_f.close()\n\n # Dump the numpyconfig.h header to stdout\n print('File: %s' % target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n config.add_data_files((header_dir, target))\n return target\n\n def generate_api_func(module_name):\n def generate_api(ext, build_dir):\n script = join(codegen_dir, module_name + '.py')\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__(module_name)\n log.info('executing %s', script)\n h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))\n finally:\n del sys.path[0]\n config.add_data_files((header_dir, h_file),\n (header_dir, doc_file))\n return (h_file,)\n return generate_api\n\n generate_numpy_api = generate_api_func('generate_numpy_api')\n generate_ufunc_api = generate_api_func('generate_ufunc_api')\n\n config.add_include_dirs(join(local_dir, \"src\", \"common\"))\n config.add_include_dirs(join(local_dir, \"src\"))\n config.add_include_dirs(join(local_dir))\n\n config.add_data_files('include/numpy/*.h')\n config.add_include_dirs(join('src', 'npymath'))\n config.add_include_dirs(join('src', 'multiarray'))\n config.add_include_dirs(join('src', 'umath'))\n config.add_include_dirs(join('src', 'npysort'))\n\n config.add_define_macros([(\"NPY_INTERNAL_BUILD\", \"1\")]) # this macro indicates that Numpy build is in process\n config.add_define_macros([(\"HAVE_NPY_CONFIG_H\", \"1\")])\n if sys.platform[:3] == \"aix\":\n config.add_define_macros([(\"_LARGE_FILES\", None)])\n else:\n config.add_define_macros([(\"_FILE_OFFSET_BITS\", \"64\")])\n config.add_define_macros([('_LARGEFILE_SOURCE', '1')])\n config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])\n\n config.numpy_include_dirs.extend(config.paths('include'))\n\n deps = [join('src', 'npymath', '_signbit.c'),\n join('include', 'numpy', '*object.h'),\n join(codegen_dir, 'genapi.py'),\n ]\n\n #######################################################################\n # dummy module #\n #######################################################################\n\n # npymath needs the config.h and numpyconfig.h files to be generated, but\n # build_clib cannot handle generate_config_h and generate_numpyconfig_h\n # (don't ask). Because clib are generated before extensions, we have to\n # explicitly add an extension which has generate_config_h and\n # generate_numpyconfig_h as sources *before* adding npymath.\n\n config.add_extension('_dummy',\n sources=[join('src', 'dummymodule.c'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api]\n )\n\n #######################################################################\n # npymath library #\n #######################################################################\n\n subst_dict = dict([(\"sep\", os.path.sep), (\"pkgname\", \"numpy.core\")])\n\n def get_mathlib_info(*args):\n # Another ugly hack: the mathlib info is known once build_src is run,\n # but we cannot use add_installed_pkg_config here either, so we only\n # update the substitution dictionary during npymath build\n config_cmd = config.get_config_cmd()\n\n # Check that the toolchain works, to fail early if it doesn't\n # (avoid late errors with MATHLIB which are confusing if the\n # compiler does not work).\n st = config_cmd.try_link('int main(void) { return 0;}')\n if not st:\n raise RuntimeError(\"Broken toolchain: cannot link a simple C program\")\n mlibs = check_mathlib(config_cmd)\n\n posix_mlib = ' '.join(['-l%s' % l for l in mlibs])\n msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])\n subst_dict[\"posix_mathlib\"] = posix_mlib\n subst_dict[\"msvc_mathlib\"] = msvc_mlib\n\n npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),\n join('src', 'npymath', 'npy_math.c'),\n join('src', 'npymath', 'ieee754.c.src'),\n join('src', 'npymath', 'npy_math_complex.c.src'),\n join('src', 'npymath', 'halffloat.c')\n ]\n\n # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.\n is_msvc = platform.system() == 'Windows'\n config.add_installed_library('npymath',\n sources=npymath_sources + [get_mathlib_info],\n install_dir='lib',\n build_info={\n 'include_dirs' : [], # empty list required for creating npy_math_internal.h\n 'extra_compiler_args' : (['/GL-'] if is_msvc else []),\n })\n config.add_npy_pkg_config(\"npymath.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n config.add_npy_pkg_config(\"mlib.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n\n #######################################################################\n # npysort library #\n #######################################################################\n\n # This library is created for the build but it is not installed\n npysort_sources = [join('src', 'common', 'npy_sort.h.src'),\n join('src', 'npysort', 'quicksort.c.src'),\n join('src', 'npysort', 'mergesort.c.src'),\n join('src', 'npysort', 'heapsort.c.src'),\n join('src', 'common', 'npy_partition.h.src'),\n join('src', 'npysort', 'selection.c.src'),\n join('src', 'common', 'npy_binsearch.h.src'),\n join('src', 'npysort', 'binsearch.c.src'),\n ]\n config.add_library('npysort',\n sources=npysort_sources,\n include_dirs=[])\n\n #######################################################################\n # multiarray_tests module #\n #######################################################################\n\n config.add_extension('_multiarray_tests',\n sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),\n join('src', 'common', 'mem_overlap.c')],\n depends=[join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_extint128.h')],\n libraries=['npymath'])\n\n #######################################################################\n # _multiarray_umath module - common part #\n #######################################################################\n\n common_deps = [\n join('src', 'common', 'array_assign.h'),\n join('src', 'common', 'binop_override.h'),\n join('src', 'common', 'cblasfuncs.h'),\n join('src', 'common', 'lowlevel_strided_loops.h'),\n join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_config.h'),\n join('src', 'common', 'npy_ctypes.h'),\n join('src', 'common', 'npy_extint128.h'),\n join('src', 'common', 'npy_import.h'),\n join('src', 'common', 'npy_longdouble.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.h'),\n join('src', 'common', 'ufunc_override.h'),\n join('src', 'common', 'umathmodule.h'),\n join('src', 'common', 'numpyos.h'),\n ]\n\n common_src = [\n join('src', 'common', 'array_assign.c'),\n join('src', 'common', 'mem_overlap.c'),\n join('src', 'common', 'npy_longdouble.c'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.c'),\n join('src', 'common', 'ufunc_override.c'),\n join('src', 'common', 'numpyos.c'),\n ]\n\n blas_info = get_info('blas_opt', 0)\n if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):\n extra_info = blas_info\n # These files are also in MANIFEST.in so that they are always in\n # the source distribution independently of HAVE_CBLAS.\n common_src.extend([join('src', 'common', 'cblasfuncs.c'),\n join('src', 'common', 'python_xerbla.c'),\n ])\n if uses_accelerate_framework(blas_info):\n common_src.extend(get_sgemv_fix())\n else:\n extra_info = {}\n\n #######################################################################\n # _multiarray_umath module - multiarray part #\n #######################################################################\n\n multiarray_deps = [\n join('src', 'multiarray', 'arrayobject.h'),\n join('src', 'multiarray', 'arraytypes.h'),\n join('src', 'multiarray', 'buffer.h'),\n join('src', 'multiarray', 'calculation.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'convert_datatype.h'),\n join('src', 'multiarray', 'convert.h'),\n join('src', 'multiarray', 'conversion_utils.h'),\n join('src', 'multiarray', 'ctors.h'),\n join('src', 'multiarray', 'descriptor.h'),\n join('src', 'multiarray', 'dragon4.h'),\n join('src', 'multiarray', 'getset.h'),\n join('src', 'multiarray', 'hashdescr.h'),\n join('src', 'multiarray', 'iterators.h'),\n join('src', 'multiarray', 'mapping.h'),\n join('src', 'multiarray', 'methods.h'),\n join('src', 'multiarray', 'multiarraymodule.h'),\n join('src', 'multiarray', 'nditer_impl.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'multiarray', 'refcount.h'),\n join('src', 'multiarray', 'scalartypes.h'),\n join('src', 'multiarray', 'sequence.h'),\n join('src', 'multiarray', 'shape.h'),\n join('src', 'multiarray', 'strfuncs.h'),\n join('src', 'multiarray', 'typeinfo.h'),\n join('src', 'multiarray', 'usertypes.h'),\n join('src', 'multiarray', 'vdot.h'),\n join('include', 'numpy', 'arrayobject.h'),\n join('include', 'numpy', '_neighborhood_iterator_imp.h'),\n join('include', 'numpy', 'npy_endian.h'),\n join('include', 'numpy', 'arrayscalars.h'),\n join('include', 'numpy', 'noprefix.h'),\n join('include', 'numpy', 'npy_interrupt.h'),\n join('include', 'numpy', 'npy_3kcompat.h'),\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('include', 'numpy', 'npy_common.h'),\n join('include', 'numpy', 'npy_os.h'),\n join('include', 'numpy', 'utils.h'),\n join('include', 'numpy', 'ndarrayobject.h'),\n join('include', 'numpy', 'npy_cpu.h'),\n join('include', 'numpy', 'numpyconfig.h'),\n join('include', 'numpy', 'ndarraytypes.h'),\n join('include', 'numpy', 'npy_1_7_deprecated_api.h'),\n # add library sources as distuils does not consider libraries\n # dependencies\n ] + npysort_sources + npymath_sources\n\n multiarray_src = [\n join('src', 'multiarray', 'alloc.c'),\n join('src', 'multiarray', 'arrayobject.c'),\n join('src', 'multiarray', 'arraytypes.c.src'),\n join('src', 'multiarray', 'array_assign_scalar.c'),\n join('src', 'multiarray', 'array_assign_array.c'),\n join('src', 'multiarray', 'buffer.c'),\n join('src', 'multiarray', 'calculation.c'),\n join('src', 'multiarray', 'compiled_base.c'),\n join('src', 'multiarray', 'common.c'),\n join('src', 'multiarray', 'convert.c'),\n join('src', 'multiarray', 'convert_datatype.c'),\n join('src', 'multiarray', 'conversion_utils.c'),\n join('src', 'multiarray', 'ctors.c'),\n join('src', 'multiarray', 'datetime.c'),\n join('src', 'multiarray', 'datetime_strings.c'),\n join('src', 'multiarray', 'datetime_busday.c'),\n join('src', 'multiarray', 'datetime_busdaycal.c'),\n join('src', 'multiarray', 'descriptor.c'),\n join('src', 'multiarray', 'dragon4.c'),\n join('src', 'multiarray', 'dtype_transfer.c'),\n join('src', 'multiarray', 'einsum.c.src'),\n join('src', 'multiarray', 'flagsobject.c'),\n join('src', 'multiarray', 'getset.c'),\n join('src', 'multiarray', 'hashdescr.c'),\n join('src', 'multiarray', 'item_selection.c'),\n join('src', 'multiarray', 'iterators.c'),\n join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),\n join('src', 'multiarray', 'mapping.c'),\n join('src', 'multiarray', 'methods.c'),\n join('src', 'multiarray', 'multiarraymodule.c'),\n join('src', 'multiarray', 'nditer_templ.c.src'),\n join('src', 'multiarray', 'nditer_api.c'),\n join('src', 'multiarray', 'nditer_constr.c'),\n join('src', 'multiarray', 'nditer_pywrap.c'),\n join('src', 'multiarray', 'number.c'),\n join('src', 'multiarray', 'refcount.c'),\n join('src', 'multiarray', 'sequence.c'),\n join('src', 'multiarray', 'shape.c'),\n join('src', 'multiarray', 'scalarapi.c'),\n join('src', 'multiarray', 'scalartypes.c.src'),\n join('src', 'multiarray', 'strfuncs.c'),\n join('src', 'multiarray', 'temp_elide.c'),\n join('src', 'multiarray', 'typeinfo.c'),\n join('src', 'multiarray', 'usertypes.c'),\n join('src', 'multiarray', 'vdot.c'),\n ]\n\n #######################################################################\n # _multiarray_umath module - umath part #\n #######################################################################\n\n def generate_umath_c(ext, build_dir):\n target = join(build_dir, header_dir, '__umath_generated.c')\n dir = os.path.dirname(target)\n if not os.path.exists(dir):\n os.makedirs(dir)\n script = generate_umath_py\n if newer(script, target):\n f = open(target, 'w')\n f.write(generate_umath.make_code(generate_umath.defdict,\n generate_umath.__file__))\n f.close()\n return []\n\n umath_src = [\n join('src', 'umath', 'umathmodule.c'),\n join('src', 'umath', 'reduction.c'),\n join('src', 'umath', 'funcs.inc.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'loops.h.src'),\n join('src', 'umath', 'loops.c.src'),\n join('src', 'umath', 'ufunc_object.c'),\n join('src', 'umath', 'extobj.c'),\n join('src', 'umath', 'cpuid.c'),\n join('src', 'umath', 'scalarmath.c.src'),\n join('src', 'umath', 'ufunc_type_resolution.c'),\n join('src', 'umath', 'override.c'),\n ]\n\n umath_deps = [\n generate_umath_py,\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'override.h'),\n join(codegen_dir, 'generate_ufunc_api.py'),\n ]\n\n config.add_extension('_multiarray_umath',\n sources=multiarray_src + umath_src +\n npymath_sources + common_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n join(codegen_dir, 'generate_numpy_api.py'),\n join('*.py'),\n generate_umath_c,\n generate_ufunc_api,\n ],\n depends=deps + multiarray_deps + umath_deps +\n common_deps,\n libraries=['npymath', 'npysort'],\n extra_info=extra_info)\n\n #######################################################################\n # umath_tests module #\n #######################################################################\n\n config.add_extension('_umath_tests',\n sources=[join('src', 'umath', '_umath_tests.c.src')])\n\n #######################################################################\n # custom rational dtype module #\n #######################################################################\n\n config.add_extension('_rational_tests',\n sources=[join('src', 'umath', '_rational_tests.c.src')])\n\n #######################################################################\n # struct_ufunc_test module #\n #######################################################################\n\n config.add_extension('_struct_ufunc_tests',\n sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])\n\n\n #######################################################################\n # operand_flag_tests module #\n #######################################################################\n\n config.add_extension('_operand_flag_tests',\n sources=[join('src', 'umath', '_operand_flag_tests.c.src')])\n\n config.add_data_dir('tests')\n config.add_data_dir('tests/data')\n\n config.make_svn_version_py()\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n",
"path": "numpy/core/setup.py"
}
] | [
{
"content": "from __future__ import division, print_function\n\nimport os\nimport sys\nimport pickle\nimport copy\nimport warnings\nimport platform\nfrom os.path import join\nfrom numpy.distutils import log\nfrom distutils.dep_util import newer\nfrom distutils.sysconfig import get_config_var\nfrom numpy._build_utils.apple_accelerate import (\n uses_accelerate_framework, get_sgemv_fix\n )\nfrom numpy.compat import npy_load_module\nfrom setup_common import *\n\n# Set to True to enable relaxed strides checking. This (mostly) means\n# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.\nNPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', \"1\") != \"0\")\n\n# Put NPY_RELAXED_STRIDES_DEBUG=1 in the environment if you want numpy to use a\n# bogus value for affected strides in order to help smoke out bad stride usage\n# when relaxed stride checking is enabled.\nNPY_RELAXED_STRIDES_DEBUG = (os.environ.get('NPY_RELAXED_STRIDES_DEBUG', \"0\") != \"0\")\nNPY_RELAXED_STRIDES_DEBUG = NPY_RELAXED_STRIDES_DEBUG and NPY_RELAXED_STRIDES_CHECKING\n\n# XXX: ugly, we use a class to avoid calling twice some expensive functions in\n# config.h/numpyconfig.h. I don't see a better way because distutils force\n# config.h generation inside an Extension class, and as such sharing\n# configuration information between extensions is not easy.\n# Using a pickled-based memoize does not work because config_cmd is an instance\n# method, which cPickle does not like.\n#\n# Use pickle in all cases, as cPickle is gone in python3 and the difference\n# in time is only in build. -- Charles Harris, 2013-03-30\n\nclass CallOnceOnly(object):\n def __init__(self):\n self._check_types = None\n self._check_ieee_macros = None\n self._check_complex = None\n\n def check_types(self, *a, **kw):\n if self._check_types is None:\n out = check_types(*a, **kw)\n self._check_types = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_types))\n return out\n\n def check_ieee_macros(self, *a, **kw):\n if self._check_ieee_macros is None:\n out = check_ieee_macros(*a, **kw)\n self._check_ieee_macros = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_ieee_macros))\n return out\n\n def check_complex(self, *a, **kw):\n if self._check_complex is None:\n out = check_complex(*a, **kw)\n self._check_complex = pickle.dumps(out)\n else:\n out = copy.deepcopy(pickle.loads(self._check_complex))\n return out\n\ndef pythonlib_dir():\n \"\"\"return path where libpython* is.\"\"\"\n if sys.platform == 'win32':\n return os.path.join(sys.prefix, \"libs\")\n else:\n return get_config_var('LIBDIR')\n\ndef is_npy_no_signal():\n \"\"\"Return True if the NPY_NO_SIGNAL symbol must be defined in configuration\n header.\"\"\"\n return sys.platform == 'win32'\n\ndef is_npy_no_smp():\n \"\"\"Return True if the NPY_NO_SMP symbol must be defined in public\n header (when SMP support cannot be reliably enabled).\"\"\"\n # Perhaps a fancier check is in order here.\n # so that threads are only enabled if there\n # are actually multiple CPUS? -- but\n # threaded code can be nice even on a single\n # CPU so that long-calculating code doesn't\n # block.\n return 'NPY_NOSMP' in os.environ\n\ndef win32_checks(deflist):\n from numpy.distutils.misc_util import get_build_architecture\n a = get_build_architecture()\n\n # Distutils hack on AMD64 on windows\n print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %\n (a, os.name, sys.platform))\n if a == 'AMD64':\n deflist.append('DISTUTILS_USE_SDK')\n\n # On win32, force long double format string to be 'g', not\n # 'Lg', since the MS runtime does not support long double whose\n # size is > sizeof(double)\n if a == \"Intel\" or a == \"AMD64\":\n deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')\n\ndef check_math_capabilities(config, moredefs, mathlibs):\n def check_func(func_name):\n return config.check_func(func_name, libraries=mathlibs,\n decl=True, call=True)\n\n def check_funcs_once(funcs_name):\n decl = dict([(f, True) for f in funcs_name])\n st = config.check_funcs_once(funcs_name, libraries=mathlibs,\n decl=decl, call=decl)\n if st:\n moredefs.extend([(fname2def(f), 1) for f in funcs_name])\n return st\n\n def check_funcs(funcs_name):\n # Use check_funcs_once first, and if it does not work, test func per\n # func. Return success only if all the functions are available\n if not check_funcs_once(funcs_name):\n # Global check failed, check func per func\n for f in funcs_name:\n if check_func(f):\n moredefs.append((fname2def(f), 1))\n return 0\n else:\n return 1\n\n #use_msvc = config.check_decl(\"_MSC_VER\")\n\n if not check_funcs_once(MANDATORY_FUNCS):\n raise SystemError(\"One of the required function to build numpy is not\"\n \" available (the list is %s).\" % str(MANDATORY_FUNCS))\n\n # Standard functions which may not be available and for which we have a\n # replacement implementation. Note that some of these are C99 functions.\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n for f in OPTIONAL_STDFUNCS_MAYBE:\n if config.check_decl(fname2def(f),\n headers=[\"Python.h\", \"math.h\"]):\n OPTIONAL_STDFUNCS.remove(f)\n\n check_funcs(OPTIONAL_STDFUNCS)\n\n for h in OPTIONAL_HEADERS:\n if config.check_func(\"\", decl=False, call=False, headers=[h]):\n h = h.replace(\".\", \"_\").replace(os.path.sep, \"_\")\n moredefs.append((fname2def(h), 1))\n\n for tup in OPTIONAL_INTRINSICS:\n headers = None\n if len(tup) == 2:\n f, args, m = tup[0], tup[1], fname2def(tup[0])\n elif len(tup) == 3:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[0])\n else:\n f, args, headers, m = tup[0], tup[1], [tup[2]], fname2def(tup[3])\n if config.check_func(f, decl=False, call=True, call_args=args,\n headers=headers):\n moredefs.append((m, 1))\n\n for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:\n if config.check_gcc_function_attribute(dec, fn):\n moredefs.append((fname2def(fn), 1))\n\n for fn in OPTIONAL_VARIABLE_ATTRIBUTES:\n if config.check_gcc_variable_attribute(fn):\n m = fn.replace(\"(\", \"_\").replace(\")\", \"_\")\n moredefs.append((fname2def(m), 1))\n\n # C99 functions: float and long double versions\n check_funcs(C99_FUNCS_SINGLE)\n check_funcs(C99_FUNCS_EXTENDED)\n\ndef check_complex(config, mathlibs):\n priv = []\n pub = []\n\n try:\n if os.uname()[0] == \"Interix\":\n warnings.warn(\"Disabling broken complex support. See #1365\", stacklevel=2)\n return priv, pub\n except Exception:\n # os.uname not available on all platforms. blanket except ugly but safe\n pass\n\n # Check for complex support\n st = config.check_header('complex.h')\n if st:\n priv.append(('HAVE_COMPLEX_H', 1))\n pub.append(('NPY_USE_C99_COMPLEX', 1))\n\n for t in C99_COMPLEX_TYPES:\n st = config.check_type(t, headers=[\"complex.h\"])\n if st:\n pub.append(('NPY_HAVE_%s' % type2def(t), 1))\n\n def check_prec(prec):\n flist = [f + prec for f in C99_COMPLEX_FUNCS]\n decl = dict([(f, True) for f in flist])\n if not config.check_funcs_once(flist, call=decl, decl=decl,\n libraries=mathlibs):\n for f in flist:\n if config.check_func(f, call=True, decl=True,\n libraries=mathlibs):\n priv.append((fname2def(f), 1))\n else:\n priv.extend([(fname2def(f), 1) for f in flist])\n\n check_prec('')\n check_prec('f')\n check_prec('l')\n\n return priv, pub\n\ndef check_ieee_macros(config):\n priv = []\n pub = []\n\n macros = []\n\n def _add_decl(f):\n priv.append(fname2def(\"decl_%s\" % f))\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n\n # XXX: hack to circumvent cpp pollution from python: python put its\n # config.h in the public namespace, so we have a clash for the common\n # functions we test. We remove every function tested by python's\n # autoconf, hoping their own test are correct\n _macros = [\"isnan\", \"isinf\", \"signbit\", \"isfinite\"]\n for f in _macros:\n py_symbol = fname2def(\"decl_%s\" % f)\n already_declared = config.check_decl(py_symbol,\n headers=[\"Python.h\", \"math.h\"])\n if already_declared:\n if config.check_macro_true(py_symbol,\n headers=[\"Python.h\", \"math.h\"]):\n pub.append('NPY_%s' % fname2def(\"decl_%s\" % f))\n else:\n macros.append(f)\n # Normally, isnan and isinf are macro (C99), but some platforms only have\n # func, or both func and macro version. Check for macro only, and define\n # replacement ones if not found.\n # Note: including Python.h is necessary because it modifies some math.h\n # definitions\n for f in macros:\n st = config.check_decl(f, headers=[\"Python.h\", \"math.h\"])\n if st:\n _add_decl(f)\n\n return priv, pub\n\ndef check_types(config_cmd, ext, build_dir):\n private_defines = []\n public_defines = []\n\n # Expected size (in number of bytes) for each type. This is an\n # optimization: those are only hints, and an exhaustive search for the size\n # is done if the hints are wrong.\n expected = {'short': [2], 'int': [4], 'long': [8, 4],\n 'float': [4], 'double': [8], 'long double': [16, 12, 8],\n 'Py_intptr_t': [8, 4], 'PY_LONG_LONG': [8], 'long long': [8],\n 'off_t': [8, 4]}\n\n # Check we have the python header (-dev* packages on Linux)\n result = config_cmd.check_header('Python.h')\n if not result:\n python = 'python'\n if '__pypy__' in sys.builtin_module_names:\n python = 'pypy'\n raise SystemError(\n \"Cannot compile 'Python.h'. Perhaps you need to \"\n \"install {0}-dev|{0}-devel.\".format(python))\n res = config_cmd.check_header(\"endian.h\")\n if res:\n private_defines.append(('HAVE_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_ENDIAN_H', 1))\n res = config_cmd.check_header(\"sys/endian.h\")\n if res:\n private_defines.append(('HAVE_SYS_ENDIAN_H', 1))\n public_defines.append(('NPY_HAVE_SYS_ENDIAN_H', 1))\n\n # Check basic types sizes\n for type in ('short', 'int', 'long'):\n res = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type), headers=[\"Python.h\"])\n if res:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), \"SIZEOF_%s\" % sym2def(type)))\n else:\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n for type in ('float', 'double', 'long double'):\n already_declared = config_cmd.check_decl(\"SIZEOF_%s\" % sym2def(type),\n headers=[\"Python.h\"])\n res = config_cmd.check_type_size(type, expected=expected[type])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n if not already_declared and not type == 'long double':\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # Compute size of corresponding complex type: used to check that our\n # definition is binary compatible with C99 complex type (check done at\n # build time in npy_common.h)\n complex_def = \"struct {%s __x; %s __y;}\" % (type, type)\n res = config_cmd.check_type_size(complex_def,\n expected=[2 * x for x in expected[type]])\n if res >= 0:\n public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % complex_def)\n\n for type in ('Py_intptr_t', 'off_t'):\n res = config_cmd.check_type_size(type, headers=[\"Python.h\"],\n library_dirs=[pythonlib_dir()],\n expected=expected[type])\n\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % type)\n\n # We check declaration AND type because that's how distutils does it.\n if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):\n res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],\n library_dirs=[pythonlib_dir()],\n expected=expected['PY_LONG_LONG'])\n if res >= 0:\n private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'PY_LONG_LONG')\n\n res = config_cmd.check_type_size('long long',\n expected=expected['long long'])\n if res >= 0:\n #private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))\n public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))\n else:\n raise SystemError(\"Checking sizeof (%s) failed !\" % 'long long')\n\n if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):\n raise RuntimeError(\n \"Config wo CHAR_BIT is not supported\"\n \", please contact the maintainers\")\n\n return private_defines, public_defines\n\ndef check_mathlib(config_cmd):\n # Testing the C math library\n mathlibs = []\n mathlibs_choices = [[], ['m'], ['cpml']]\n mathlib = os.environ.get('MATHLIB')\n if mathlib:\n mathlibs_choices.insert(0, mathlib.split(','))\n for libs in mathlibs_choices:\n if config_cmd.check_func(\"exp\", libraries=libs, decl=True, call=True):\n mathlibs = libs\n break\n else:\n raise EnvironmentError(\"math library missing; rerun \"\n \"setup.py after setting the \"\n \"MATHLIB env variable\")\n return mathlibs\n\ndef visibility_define(config):\n \"\"\"Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty\n string).\"\"\"\n hide = '__attribute__((visibility(\"hidden\")))'\n if config.check_gcc_function_attribute(hide, 'hideme'):\n return hide\n else:\n return ''\n\ndef configuration(parent_package='',top_path=None):\n from numpy.distutils.misc_util import Configuration, dot_join\n from numpy.distutils.system_info import get_info\n\n config = Configuration('core', parent_package, top_path)\n local_dir = config.local_path\n codegen_dir = join(local_dir, 'code_generators')\n\n if is_released(config):\n warnings.simplefilter('error', MismatchCAPIWarning)\n\n # Check whether we have a mismatch between the set C API VERSION and the\n # actual C API VERSION\n check_api_version(C_API_VERSION, codegen_dir)\n\n generate_umath_py = join(codegen_dir, 'generate_umath.py')\n n = dot_join(config.name, 'generate_umath')\n generate_umath = npy_load_module('_'.join(n.split('.')),\n generate_umath_py, ('.py', 'U', 1))\n\n header_dir = 'include/numpy' # this is relative to config.path_in_package\n\n cocache = CallOnceOnly()\n\n def generate_config_h(ext, build_dir):\n target = join(build_dir, header_dir, 'config.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)\n\n # Check math library and C99 math funcs availability\n mathlibs = check_mathlib(config_cmd)\n moredefs.append(('MATHLIB', ','.join(mathlibs)))\n\n check_math_capabilities(config_cmd, moredefs, mathlibs)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])\n\n # Signal check\n if is_npy_no_signal():\n moredefs.append('__NPY_PRIVATE_NO_SIGNAL')\n\n # Windows checks\n if sys.platform == 'win32' or os.name == 'nt':\n win32_checks(moredefs)\n\n # C99 restrict keyword\n moredefs.append(('NPY_RESTRICT', config_cmd.check_restrict()))\n\n # Inline check\n inline = config_cmd.check_inline()\n\n # Use relaxed stride checking\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n # Use bogus stride debug aid when relaxed strides are enabled\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Get long double representation\n rep = check_long_double_representation(config_cmd)\n moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))\n\n # Py3K check\n if sys.version_info[0] == 3:\n moredefs.append(('NPY_PY3K', 1))\n\n # Generate the config.h file from moredefs\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # define inline to our keyword, or nothing\n target_f.write('#ifndef __cplusplus\\n')\n if inline == 'inline':\n target_f.write('/* #undef inline */\\n')\n else:\n target_f.write('#define inline %s\\n' % inline)\n target_f.write('#endif\\n')\n\n # add the guard to make sure config.h is never included directly,\n # but always through npy_config.h\n target_f.write(\"\"\"\n#ifndef _NPY_NPY_CONFIG_H_\n#error config.h should never be included directly, include npy_config.h instead\n#endif\n\"\"\")\n\n target_f.close()\n print('File:', target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n else:\n mathlibs = []\n target_f = open(target)\n for line in target_f:\n s = '#define MATHLIB'\n if line.startswith(s):\n value = line[len(s):].strip()\n if value:\n mathlibs.extend(value.split(','))\n target_f.close()\n\n # Ugly: this can be called within a library and not an extension,\n # in which case there is no libraries attributes (and none is\n # needed).\n if hasattr(ext, 'libraries'):\n ext.libraries.extend(mathlibs)\n\n incl_dir = os.path.dirname(target)\n if incl_dir not in config.numpy_include_dirs:\n config.numpy_include_dirs.append(incl_dir)\n\n return target\n\n def generate_numpyconfig_h(ext, build_dir):\n \"\"\"Depends on config.h: generate_config_h has to be called before !\"\"\"\n # put common include directory in build_dir on search path\n # allows using code generation in headers headers\n config.add_include_dirs(join(build_dir, \"src\", \"common\"))\n config.add_include_dirs(join(build_dir, \"src\", \"npymath\"))\n\n target = join(build_dir, header_dir, '_numpyconfig.h')\n d = os.path.dirname(target)\n if not os.path.exists(d):\n os.makedirs(d)\n if newer(__file__, target):\n config_cmd = config.get_config_cmd()\n log.info('Generating %s', target)\n\n # Check sizeof\n ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)\n\n if is_npy_no_signal():\n moredefs.append(('NPY_NO_SIGNAL', 1))\n\n if is_npy_no_smp():\n moredefs.append(('NPY_NO_SMP', 1))\n else:\n moredefs.append(('NPY_NO_SMP', 0))\n\n mathlibs = check_mathlib(config_cmd)\n moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])\n moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])\n\n if NPY_RELAXED_STRIDES_CHECKING:\n moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))\n\n if NPY_RELAXED_STRIDES_DEBUG:\n moredefs.append(('NPY_RELAXED_STRIDES_DEBUG', 1))\n\n # Check whether we can use inttypes (C99) formats\n if config_cmd.check_decl('PRIdPTR', headers=['inttypes.h']):\n moredefs.append(('NPY_USE_C99_FORMATS', 1))\n\n # visibility check\n hidden_visibility = visibility_define(config_cmd)\n moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))\n\n # Add the C API/ABI versions\n moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))\n moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))\n\n # Add moredefs to header\n target_f = open(target, 'w')\n for d in moredefs:\n if isinstance(d, str):\n target_f.write('#define %s\\n' % (d))\n else:\n target_f.write('#define %s %s\\n' % (d[0], d[1]))\n\n # Define __STDC_FORMAT_MACROS\n target_f.write(\"\"\"\n#ifndef __STDC_FORMAT_MACROS\n#define __STDC_FORMAT_MACROS 1\n#endif\n\"\"\")\n target_f.close()\n\n # Dump the numpyconfig.h header to stdout\n print('File: %s' % target)\n target_f = open(target)\n print(target_f.read())\n target_f.close()\n print('EOF')\n config.add_data_files((header_dir, target))\n return target\n\n def generate_api_func(module_name):\n def generate_api(ext, build_dir):\n script = join(codegen_dir, module_name + '.py')\n sys.path.insert(0, codegen_dir)\n try:\n m = __import__(module_name)\n log.info('executing %s', script)\n h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))\n finally:\n del sys.path[0]\n config.add_data_files((header_dir, h_file),\n (header_dir, doc_file))\n return (h_file,)\n return generate_api\n\n generate_numpy_api = generate_api_func('generate_numpy_api')\n generate_ufunc_api = generate_api_func('generate_ufunc_api')\n\n config.add_include_dirs(join(local_dir, \"src\", \"common\"))\n config.add_include_dirs(join(local_dir, \"src\"))\n config.add_include_dirs(join(local_dir))\n\n config.add_data_files('include/numpy/*.h')\n config.add_include_dirs(join('src', 'npymath'))\n config.add_include_dirs(join('src', 'multiarray'))\n config.add_include_dirs(join('src', 'umath'))\n config.add_include_dirs(join('src', 'npysort'))\n\n config.add_define_macros([(\"NPY_INTERNAL_BUILD\", \"1\")]) # this macro indicates that Numpy build is in process\n config.add_define_macros([(\"HAVE_NPY_CONFIG_H\", \"1\")])\n if sys.platform[:3] == \"aix\":\n config.add_define_macros([(\"_LARGE_FILES\", None)])\n else:\n config.add_define_macros([(\"_FILE_OFFSET_BITS\", \"64\")])\n config.add_define_macros([('_LARGEFILE_SOURCE', '1')])\n config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])\n\n config.numpy_include_dirs.extend(config.paths('include'))\n\n deps = [join('src', 'npymath', '_signbit.c'),\n join('include', 'numpy', '*object.h'),\n join(codegen_dir, 'genapi.py'),\n ]\n\n #######################################################################\n # dummy module #\n #######################################################################\n\n # npymath needs the config.h and numpyconfig.h files to be generated, but\n # build_clib cannot handle generate_config_h and generate_numpyconfig_h\n # (don't ask). Because clib are generated before extensions, we have to\n # explicitly add an extension which has generate_config_h and\n # generate_numpyconfig_h as sources *before* adding npymath.\n\n config.add_extension('_dummy',\n sources=[join('src', 'dummymodule.c'),\n generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api]\n )\n\n #######################################################################\n # npymath library #\n #######################################################################\n\n subst_dict = dict([(\"sep\", os.path.sep), (\"pkgname\", \"numpy.core\")])\n\n def get_mathlib_info(*args):\n # Another ugly hack: the mathlib info is known once build_src is run,\n # but we cannot use add_installed_pkg_config here either, so we only\n # update the substitution dictionary during npymath build\n config_cmd = config.get_config_cmd()\n\n # Check that the toolchain works, to fail early if it doesn't\n # (avoid late errors with MATHLIB which are confusing if the\n # compiler does not work).\n st = config_cmd.try_link('int main(void) { return 0;}')\n if not st:\n raise RuntimeError(\"Broken toolchain: cannot link a simple C program\")\n mlibs = check_mathlib(config_cmd)\n\n posix_mlib = ' '.join(['-l%s' % l for l in mlibs])\n msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])\n subst_dict[\"posix_mathlib\"] = posix_mlib\n subst_dict[\"msvc_mathlib\"] = msvc_mlib\n\n npymath_sources = [join('src', 'npymath', 'npy_math_internal.h.src'),\n join('src', 'npymath', 'npy_math.c'),\n join('src', 'npymath', 'ieee754.c.src'),\n join('src', 'npymath', 'npy_math_complex.c.src'),\n join('src', 'npymath', 'halffloat.c')\n ]\n\n # Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.\n is_msvc = platform.system() == 'Windows'\n config.add_installed_library('npymath',\n sources=npymath_sources + [get_mathlib_info],\n install_dir='lib',\n build_info={\n 'include_dirs' : [], # empty list required for creating npy_math_internal.h\n 'extra_compiler_args' : (['/GL-'] if is_msvc else []),\n })\n config.add_npy_pkg_config(\"npymath.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n config.add_npy_pkg_config(\"mlib.ini.in\", \"lib/npy-pkg-config\",\n subst_dict)\n\n #######################################################################\n # npysort library #\n #######################################################################\n\n # This library is created for the build but it is not installed\n npysort_sources = [join('src', 'common', 'npy_sort.h.src'),\n join('src', 'npysort', 'quicksort.c.src'),\n join('src', 'npysort', 'mergesort.c.src'),\n join('src', 'npysort', 'heapsort.c.src'),\n join('src', 'common', 'npy_partition.h.src'),\n join('src', 'npysort', 'selection.c.src'),\n join('src', 'common', 'npy_binsearch.h.src'),\n join('src', 'npysort', 'binsearch.c.src'),\n ]\n config.add_library('npysort',\n sources=npysort_sources,\n include_dirs=[])\n\n #######################################################################\n # multiarray_tests module #\n #######################################################################\n\n config.add_extension('_multiarray_tests',\n sources=[join('src', 'multiarray', '_multiarray_tests.c.src'),\n join('src', 'common', 'mem_overlap.c')],\n depends=[join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_extint128.h')],\n libraries=['npymath'])\n\n #######################################################################\n # _multiarray_umath module - common part #\n #######################################################################\n\n common_deps = [\n join('src', 'common', 'array_assign.h'),\n join('src', 'common', 'binop_override.h'),\n join('src', 'common', 'cblasfuncs.h'),\n join('src', 'common', 'lowlevel_strided_loops.h'),\n join('src', 'common', 'mem_overlap.h'),\n join('src', 'common', 'npy_config.h'),\n join('src', 'common', 'npy_ctypes.h'),\n join('src', 'common', 'npy_extint128.h'),\n join('src', 'common', 'npy_import.h'),\n join('src', 'common', 'npy_longdouble.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.h'),\n join('src', 'common', 'ufunc_override.h'),\n join('src', 'common', 'umathmodule.h'),\n join('src', 'common', 'numpyos.h'),\n ]\n\n common_src = [\n join('src', 'common', 'array_assign.c'),\n join('src', 'common', 'mem_overlap.c'),\n join('src', 'common', 'npy_longdouble.c'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'common', 'ucsnarrow.c'),\n join('src', 'common', 'ufunc_override.c'),\n join('src', 'common', 'numpyos.c'),\n ]\n\n blas_info = get_info('blas_opt', 0)\n if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):\n extra_info = blas_info\n # These files are also in MANIFEST.in so that they are always in\n # the source distribution independently of HAVE_CBLAS.\n common_src.extend([join('src', 'common', 'cblasfuncs.c'),\n join('src', 'common', 'python_xerbla.c'),\n ])\n if uses_accelerate_framework(blas_info):\n common_src.extend(get_sgemv_fix())\n else:\n extra_info = {}\n\n #######################################################################\n # _multiarray_umath module - multiarray part #\n #######################################################################\n\n multiarray_deps = [\n join('src', 'multiarray', 'arrayobject.h'),\n join('src', 'multiarray', 'arraytypes.h'),\n join('src', 'multiarray', 'buffer.h'),\n join('src', 'multiarray', 'calculation.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'convert_datatype.h'),\n join('src', 'multiarray', 'convert.h'),\n join('src', 'multiarray', 'conversion_utils.h'),\n join('src', 'multiarray', 'ctors.h'),\n join('src', 'multiarray', 'descriptor.h'),\n join('src', 'multiarray', 'dragon4.h'),\n join('src', 'multiarray', 'getset.h'),\n join('src', 'multiarray', 'hashdescr.h'),\n join('src', 'multiarray', 'iterators.h'),\n join('src', 'multiarray', 'mapping.h'),\n join('src', 'multiarray', 'methods.h'),\n join('src', 'multiarray', 'multiarraymodule.h'),\n join('src', 'multiarray', 'nditer_impl.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'multiarray', 'refcount.h'),\n join('src', 'multiarray', 'scalartypes.h'),\n join('src', 'multiarray', 'sequence.h'),\n join('src', 'multiarray', 'shape.h'),\n join('src', 'multiarray', 'strfuncs.h'),\n join('src', 'multiarray', 'typeinfo.h'),\n join('src', 'multiarray', 'usertypes.h'),\n join('src', 'multiarray', 'vdot.h'),\n join('include', 'numpy', 'arrayobject.h'),\n join('include', 'numpy', '_neighborhood_iterator_imp.h'),\n join('include', 'numpy', 'npy_endian.h'),\n join('include', 'numpy', 'arrayscalars.h'),\n join('include', 'numpy', 'noprefix.h'),\n join('include', 'numpy', 'npy_interrupt.h'),\n join('include', 'numpy', 'npy_3kcompat.h'),\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('include', 'numpy', 'npy_common.h'),\n join('include', 'numpy', 'npy_os.h'),\n join('include', 'numpy', 'utils.h'),\n join('include', 'numpy', 'ndarrayobject.h'),\n join('include', 'numpy', 'npy_cpu.h'),\n join('include', 'numpy', 'numpyconfig.h'),\n join('include', 'numpy', 'ndarraytypes.h'),\n join('include', 'numpy', 'npy_1_7_deprecated_api.h'),\n # add library sources as distuils does not consider libraries\n # dependencies\n ] + npysort_sources + npymath_sources\n\n multiarray_src = [\n join('src', 'multiarray', 'alloc.c'),\n join('src', 'multiarray', 'arrayobject.c'),\n join('src', 'multiarray', 'arraytypes.c.src'),\n join('src', 'multiarray', 'array_assign_scalar.c'),\n join('src', 'multiarray', 'array_assign_array.c'),\n join('src', 'multiarray', 'buffer.c'),\n join('src', 'multiarray', 'calculation.c'),\n join('src', 'multiarray', 'compiled_base.c'),\n join('src', 'multiarray', 'common.c'),\n join('src', 'multiarray', 'convert.c'),\n join('src', 'multiarray', 'convert_datatype.c'),\n join('src', 'multiarray', 'conversion_utils.c'),\n join('src', 'multiarray', 'ctors.c'),\n join('src', 'multiarray', 'datetime.c'),\n join('src', 'multiarray', 'datetime_strings.c'),\n join('src', 'multiarray', 'datetime_busday.c'),\n join('src', 'multiarray', 'datetime_busdaycal.c'),\n join('src', 'multiarray', 'descriptor.c'),\n join('src', 'multiarray', 'dragon4.c'),\n join('src', 'multiarray', 'dtype_transfer.c'),\n join('src', 'multiarray', 'einsum.c.src'),\n join('src', 'multiarray', 'flagsobject.c'),\n join('src', 'multiarray', 'getset.c'),\n join('src', 'multiarray', 'hashdescr.c'),\n join('src', 'multiarray', 'item_selection.c'),\n join('src', 'multiarray', 'iterators.c'),\n join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),\n join('src', 'multiarray', 'mapping.c'),\n join('src', 'multiarray', 'methods.c'),\n join('src', 'multiarray', 'multiarraymodule.c'),\n join('src', 'multiarray', 'nditer_templ.c.src'),\n join('src', 'multiarray', 'nditer_api.c'),\n join('src', 'multiarray', 'nditer_constr.c'),\n join('src', 'multiarray', 'nditer_pywrap.c'),\n join('src', 'multiarray', 'number.c'),\n join('src', 'multiarray', 'refcount.c'),\n join('src', 'multiarray', 'sequence.c'),\n join('src', 'multiarray', 'shape.c'),\n join('src', 'multiarray', 'scalarapi.c'),\n join('src', 'multiarray', 'scalartypes.c.src'),\n join('src', 'multiarray', 'strfuncs.c'),\n join('src', 'multiarray', 'temp_elide.c'),\n join('src', 'multiarray', 'typeinfo.c'),\n join('src', 'multiarray', 'usertypes.c'),\n join('src', 'multiarray', 'vdot.c'),\n ]\n\n #######################################################################\n # _multiarray_umath module - umath part #\n #######################################################################\n\n def generate_umath_c(ext, build_dir):\n target = join(build_dir, header_dir, '__umath_generated.c')\n dir = os.path.dirname(target)\n if not os.path.exists(dir):\n os.makedirs(dir)\n script = generate_umath_py\n if newer(script, target):\n f = open(target, 'w')\n f.write(generate_umath.make_code(generate_umath.defdict,\n generate_umath.__file__))\n f.close()\n return []\n\n umath_src = [\n join('src', 'umath', 'umathmodule.c'),\n join('src', 'umath', 'reduction.c'),\n join('src', 'umath', 'funcs.inc.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'loops.h.src'),\n join('src', 'umath', 'loops.c.src'),\n join('src', 'umath', 'ufunc_object.c'),\n join('src', 'umath', 'extobj.c'),\n join('src', 'umath', 'cpuid.c'),\n join('src', 'umath', 'scalarmath.c.src'),\n join('src', 'umath', 'ufunc_type_resolution.c'),\n join('src', 'umath', 'override.c'),\n ]\n\n umath_deps = [\n generate_umath_py,\n join('include', 'numpy', 'npy_math.h'),\n join('include', 'numpy', 'halffloat.h'),\n join('src', 'multiarray', 'common.h'),\n join('src', 'multiarray', 'number.h'),\n join('src', 'common', 'templ_common.h.src'),\n join('src', 'umath', 'simd.inc.src'),\n join('src', 'umath', 'override.h'),\n join(codegen_dir, 'generate_ufunc_api.py'),\n ]\n\n config.add_extension('_multiarray_umath',\n sources=multiarray_src + umath_src +\n npymath_sources + common_src +\n [generate_config_h,\n generate_numpyconfig_h,\n generate_numpy_api,\n join(codegen_dir, 'generate_numpy_api.py'),\n join('*.py'),\n generate_umath_c,\n generate_ufunc_api,\n ],\n depends=deps + multiarray_deps + umath_deps +\n common_deps,\n libraries=['npymath', 'npysort'],\n extra_info=extra_info)\n\n #######################################################################\n # umath_tests module #\n #######################################################################\n\n config.add_extension('_umath_tests',\n sources=[join('src', 'umath', '_umath_tests.c.src')])\n\n #######################################################################\n # custom rational dtype module #\n #######################################################################\n\n config.add_extension('_rational_tests',\n sources=[join('src', 'umath', '_rational_tests.c.src')])\n\n #######################################################################\n # struct_ufunc_test module #\n #######################################################################\n\n config.add_extension('_struct_ufunc_tests',\n sources=[join('src', 'umath', '_struct_ufunc_tests.c.src')])\n\n\n #######################################################################\n # operand_flag_tests module #\n #######################################################################\n\n config.add_extension('_operand_flag_tests',\n sources=[join('src', 'umath', '_operand_flag_tests.c.src')])\n\n config.add_data_dir('tests')\n config.add_data_dir('tests/data')\n\n config.make_svn_version_py()\n\n return config\n\nif __name__ == '__main__':\n from numpy.distutils.core import setup\n setup(configuration=configuration)\n",
"path": "numpy/core/setup.py"
}
] | diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index efcacfb8e5fc..23a9e268bbaf 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -379,8 +379,9 @@ def check_mathlib(config_cmd):
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
- if config.check_compiler_gcc4():
- return '__attribute__((visibility("hidden")))'
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
else:
return ''
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index f05ee14313ab..2a827557215a 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -2066,3 +2066,9 @@ init_multiarray_tests(void)
}
return RETVAL;
}
+
+NPY_NO_EXPORT int
+test_not_exported(void)
+{
+ return 1;
+}
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index 856cca8eb2fb..194f8ecbb8a5 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -4,7 +4,10 @@
import numpy as np
import pytest
-
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
def check_dir(module, module_name=None):
"""Returns a mapping of all objects with the wrong __module__ attribute."""
@@ -75,3 +78,12 @@ def test_numpy_linalg():
def test_numpy_fft():
bad_results = check_dir(np.fft)
assert bad_results == {}
+
[email protected](ctypes is None,
+ reason="ctypes not available in this python")
+def test_NPY_NO_EXPORT():
+ cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
+ # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
+ f = getattr(cdll, 'test_not_exported', None)
+ assert f is None, ("'test_not_exported' is mistakenly exported, "
+ "NPY_NO_EXPORT does not work")
| BUG: functions marked with NPY_NO_EXPORT still are exported
Steps to reproduce (on linux):
- git checkout
- `python setup.py build_ext`
- choose a random function marked with the `NPY_NO_EXPORT` macro, for instance [`datetime_metadata_divides`](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/src/multiarray/_datetime.h#L108) and ~call
`nm build/lib*/numpy/core/_multiarray_umath*.so |grep datetime_metadata_divides`~ check that the function is not exported:
```
import ctypes, numpy as np
dll = ctypes.CDLL(np.core._multiarray_umath.__file__)
print(getattr(dll, `datetime_metadata_divides`, None)
```
Note that the function appears in the result. It should not. I think the problem is in the `visibility_define` [function](https://github.com/numpy/numpy/blob/v1.15.4/numpy/core/setup.py#L379) which only hides the functions for gcc 4.
Edit: use ctypes to check for export
|
zulip__zulip-21726 | [
{
"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n",
"path": "tools/lib/capitalization.py"
}
] | [
{
"content": "import re\nfrom typing import List, Match, Tuple\n\nfrom bs4 import BeautifulSoup\n\n# The phrases in this list will be ignored. The longest phrase is\n# tried first; this removes the chance of smaller phrases changing\n# the text before longer phrases are tried.\n# The errors shown by `tools/check-capitalization` can be added to\n# this list without any modification.\nIGNORED_PHRASES = [\n # Proper nouns and acronyms\n r\"API\",\n r\"APNS\",\n r\"Botserver\",\n r\"Cookie Bot\",\n r\"DevAuthBackend\",\n r\"GCM\",\n r\"GitHub\",\n r\"Gravatar\",\n r\"Help Center\",\n r\"HTTP\",\n r\"ID\",\n r\"IDs\",\n r\"IP\",\n r\"JSON\",\n r\"Kerberos\",\n r\"LDAP\",\n r\"Markdown\",\n r\"OTP\",\n r\"Pivotal\",\n r\"PM\",\n r\"PMs\",\n r\"Slack\",\n r\"Google\",\n r\"Terms of Service\",\n r\"Tuesday\",\n r\"URL\",\n r\"UUID\",\n r\"Webathena\",\n r\"WordPress\",\n r\"Zephyr\",\n r\"Zoom\",\n r\"Zulip\",\n r\"Zulip Server\",\n r\"Zulip Account Security\",\n r\"Zulip Security\",\n r\"Zulip Cloud Standard\",\n r\"BigBlueButton\",\n # Code things\n r\"\\.zuliprc\",\n # BeautifulSoup will remove <z-user> which is horribly confusing,\n # so we need more of the sentence.\n r\"<z-user></z-user> will have the same role\",\n # Things using \"I\"\n r\"I understand\",\n r\"I'm\",\n r\"I've\",\n # Specific short words\n r\"beta\",\n r\"and\",\n r\"bot\",\n r\"e\\.g\\.\",\n r\"enabled\",\n r\"signups\",\n # Placeholders\n r\"keyword\",\n r\"streamname\",\n r\"user@example\\.com\",\n # Fragments of larger strings\n (r\"your subscriptions on your Streams page\"),\n r\"Add global time<br />Everyone sees global times in their own time zone\\.\",\n r\"user\",\n r\"an unknown operating system\",\n r\"Go to Settings\",\n # SPECIAL CASES\n # Because topics usually are lower-case, this would look weird if it were capitalized\n r\"more topics\",\n # Used alone in a parenthetical where capitalized looks worse.\n r\"^deprecated$\",\n # We want the similar text in the Private Messages section to have the same capitalization.\n r\"more conversations\",\n r\"back to streams\",\n # Capital 'i' looks weird in reminders popover\n r\"in 1 hour\",\n r\"in 20 minutes\",\n r\"in 3 hours\",\n # these are used as topics\n r\"^new streams$\",\n r\"^stream events$\",\n # These are used as example short names (e.g. an uncapitalized context):\n r\"^marketing$\",\n r\"^cookie$\",\n # Used to refer custom time limits\n r\"\\bN\\b\",\n # Capital c feels obtrusive in clear status option\n r\"clear\",\n r\"group private messages with \\{recipient\\}\",\n r\"private messages with \\{recipient\\}\",\n r\"private messages with yourself\",\n r\"GIF\",\n # Emoji name placeholder\n r\"leafy green vegetable\",\n # Subdomain placeholder\n r\"your-organization-url\",\n # Used in invite modal\n r\"or\",\n # Used in GIPHY popover.\n r\"GIFs\",\n r\"GIPHY\",\n # Used in our case studies\n r\"Technical University of Munich\",\n r\"University of California San Diego\",\n # Used in stream creation form\n r\"email hidden\",\n # Use in compose box.\n r\"to send\",\n r\"to add a new line\",\n # Used in showing Notification Bot read receipts message\n \"Notification Bot\",\n # Used in presence_enabled setting label\n r\"invisible mode off\",\n # Typeahead suggestions for \"Pronouns\" custom field type.\n r\"he/him\",\n r\"she/her\",\n r\"they/them\",\n]\n\n# Sort regexes in descending order of their lengths. As a result, the\n# longer phrases will be ignored first.\nIGNORED_PHRASES.sort(key=lambda regex: len(regex), reverse=True)\n\n# Compile regexes to improve performance. This also extracts the\n# text using BeautifulSoup and then removes extra whitespaces from\n# it. This step enables us to add HTML in our regexes directly.\nCOMPILED_IGNORED_PHRASES = [\n re.compile(\" \".join(BeautifulSoup(regex, \"lxml\").text.split())) for regex in IGNORED_PHRASES\n]\n\nSPLIT_BOUNDARY = \"?.!\" # Used to split string into sentences.\nSPLIT_BOUNDARY_REGEX = re.compile(rf\"[{SPLIT_BOUNDARY}]\")\n\n# Regexes which check capitalization in sentences.\nDISALLOWED = [\n r\"^[a-z](?!\\})\", # Checks if the sentence starts with a lower case character.\n r\"^[A-Z][a-z]+[\\sa-z0-9]+[A-Z]\", # Checks if an upper case character exists\n # after a lower case character when the first character is in upper case.\n]\nDISALLOWED_REGEX = re.compile(r\"|\".join(DISALLOWED))\n\nBANNED_WORDS = {\n \"realm\": \"The term realm should not appear in user-facing strings. Use organization instead.\",\n}\n\n\ndef get_safe_phrase(phrase: str) -> str:\n \"\"\"\n Safe phrase is in lower case and doesn't contain characters which can\n conflict with split boundaries. All conflicting characters are replaced\n with low dash (_).\n \"\"\"\n phrase = SPLIT_BOUNDARY_REGEX.sub(\"_\", phrase)\n return phrase.lower()\n\n\ndef replace_with_safe_phrase(matchobj: Match[str]) -> str:\n \"\"\"\n The idea is to convert IGNORED_PHRASES into safe phrases, see\n `get_safe_phrase()` function. The only exception is when the\n IGNORED_PHRASE is at the start of the text or after a split\n boundary; in this case, we change the first letter of the phrase\n to upper case.\n \"\"\"\n ignored_phrase = matchobj.group(0)\n safe_string = get_safe_phrase(ignored_phrase)\n\n start_index = matchobj.start()\n complete_string = matchobj.string\n\n is_string_start = start_index == 0\n # We expect that there will be one space between split boundary\n # and the next word.\n punctuation = complete_string[max(start_index - 2, 0)]\n is_after_split_boundary = punctuation in SPLIT_BOUNDARY\n if is_string_start or is_after_split_boundary:\n return safe_string.capitalize()\n\n return safe_string\n\n\ndef get_safe_text(text: str) -> str:\n \"\"\"\n This returns text which is rendered by BeautifulSoup and is in the\n form that can be split easily and has all IGNORED_PHRASES processed.\n \"\"\"\n soup = BeautifulSoup(text, \"lxml\")\n text = \" \".join(soup.text.split()) # Remove extra whitespaces.\n for phrase_regex in COMPILED_IGNORED_PHRASES:\n text = phrase_regex.sub(replace_with_safe_phrase, text)\n\n return text\n\n\ndef is_capitalized(safe_text: str) -> bool:\n sentences = SPLIT_BOUNDARY_REGEX.split(safe_text)\n return not any(DISALLOWED_REGEX.search(sentence.strip()) for sentence in sentences)\n\n\ndef check_banned_words(text: str) -> List[str]:\n lower_cased_text = text.lower()\n errors = []\n for word, reason in BANNED_WORDS.items():\n if word in lower_cased_text:\n # Hack: Should move this into BANNED_WORDS framework; for\n # now, just hand-code the skips:\n if \"realm_name\" in lower_cased_text:\n continue\n kwargs = dict(word=word, text=text, reason=reason)\n msg = \"{word} found in '{text}'. {reason}\".format(**kwargs)\n errors.append(msg)\n\n return errors\n\n\ndef check_capitalization(strings: List[str]) -> Tuple[List[str], List[str], List[str]]:\n errors = []\n ignored = []\n banned_word_errors = []\n for text in strings:\n text = \" \".join(text.split()) # Remove extra whitespaces.\n safe_text = get_safe_text(text)\n has_ignored_phrase = text != safe_text\n capitalized = is_capitalized(safe_text)\n if not capitalized:\n errors.append(text)\n elif has_ignored_phrase:\n ignored.append(text)\n\n banned_word_errors.extend(check_banned_words(text))\n\n return sorted(errors), sorted(ignored), sorted(banned_word_errors)\n",
"path": "tools/lib/capitalization.py"
}
] | diff --git a/frontend_tests/node_tests/dispatch.js b/frontend_tests/node_tests/dispatch.js
index d35f691150ea5..d6161ebf463b3 100644
--- a/frontend_tests/node_tests/dispatch.js
+++ b/frontend_tests/node_tests/dispatch.js
@@ -40,6 +40,7 @@ const message_lists = mock_esm("../../static/js/message_lists");
const muted_topics_ui = mock_esm("../../static/js/muted_topics_ui");
const muted_users_ui = mock_esm("../../static/js/muted_users_ui");
const notifications = mock_esm("../../static/js/notifications");
+const pm_list = mock_esm("../../static/js/pm_list");
const reactions = mock_esm("../../static/js/reactions");
const realm_icon = mock_esm("../../static/js/realm_icon");
const realm_logo = mock_esm("../../static/js/realm_logo");
@@ -1020,6 +1021,7 @@ run_test("user_status", ({override}) => {
{
const stub = make_stub();
override(activity, "redraw_user", stub.f);
+ override(pm_list, "update_private_messages", noop);
dispatch(event);
assert.equal(stub.num_calls, 1);
const args = stub.get_args("user_id");
diff --git a/frontend_tests/node_tests/pm_list.js b/frontend_tests/node_tests/pm_list.js
index daba91ef7df55..68aba53f7badd 100644
--- a/frontend_tests/node_tests/pm_list.js
+++ b/frontend_tests/node_tests/pm_list.js
@@ -17,14 +17,16 @@ run_test("update_dom_with_unread_counts", () => {
assert.equal(narrow_state.active(), true);
const $total_count = $.create("total-count-stub");
- const $private_li = $(".top_left_private_messages .private_messages_header");
+ const $private_li = $(
+ ".private_messages_container #private_messages_section #private_messages_section_header",
+ );
$private_li.set_find_results(".unread_count", $total_count);
counts = {
private_message_count: 10,
};
- pm_list.update_dom_with_unread_counts(counts);
+ pm_list.set_count(counts.private_message_count);
assert.equal($total_count.text(), "10");
assert.ok($total_count.visible());
@@ -32,7 +34,7 @@ run_test("update_dom_with_unread_counts", () => {
private_message_count: 0,
};
- pm_list.update_dom_with_unread_counts(counts);
+ pm_list.set_count(counts.private_message_count);
assert.equal($total_count.text(), "");
assert.ok(!$total_count.visible());
});
diff --git a/frontend_tests/node_tests/recent_topics.js b/frontend_tests/node_tests/recent_topics.js
index c04a649998ac4..5b490380bf784 100644
--- a/frontend_tests/node_tests/recent_topics.js
+++ b/frontend_tests/node_tests/recent_topics.js
@@ -107,6 +107,10 @@ const narrow = mock_esm("../../static/js/narrow", {
handle_middle_pane_transition: noop,
has_shown_message_list_view: true,
});
+mock_esm("../../static/js/pm_list", {
+ update_private_messages: noop,
+ handle_narrow_deactivated: noop,
+});
mock_esm("../../static/js/popovers", {
any_active: () => false,
});
diff --git a/frontend_tests/node_tests/stream_list.js b/frontend_tests/node_tests/stream_list.js
index 905d4e4142605..3c81b39a14e63 100644
--- a/frontend_tests/node_tests/stream_list.js
+++ b/frontend_tests/node_tests/stream_list.js
@@ -409,6 +409,7 @@ test_ui("narrowing", ({mock_template}) => {
topic_list.rebuild = noop;
topic_list.active_stream_id = noop;
topic_list.get_stream_li = noop;
+ $("#streams_header").outerHeight = () => 0;
assert.ok(!$("<devel-sidebar-row-stub>").hasClass("active-filter"));
@@ -700,6 +701,7 @@ test_ui("refresh_pin", ({override, override_rewire, mock_template}) => {
override_rewire(stream_list, "update_count_in_dom", noop);
$("#stream_filters").append = noop;
+ $("#streams_header").outerHeight = () => 0;
let scrolled;
override(scroll_util, "scroll_element_into_container", ($li) => {
diff --git a/frontend_tests/node_tests/vdom.js b/frontend_tests/node_tests/vdom.js
index 83f7dda6a3347..3a6aeec4d843c 100644
--- a/frontend_tests/node_tests/vdom.js
+++ b/frontend_tests/node_tests/vdom.js
@@ -26,7 +26,7 @@ run_test("basics", () => {
run_test("attribute escaping", () => {
// So far most of the time our attributes are
- // hard-coded classes like "expanded_private_messages",
+ // hard-coded classes like "pm-list",
// but we need to be defensive about future code
// that might use data from possibly malicious users.
const opts = {
diff --git a/frontend_tests/puppeteer_tests/compose.ts b/frontend_tests/puppeteer_tests/compose.ts
index c9790e9e8abbb..00e6a6d3b530a 100644
--- a/frontend_tests/puppeteer_tests/compose.ts
+++ b/frontend_tests/puppeteer_tests/compose.ts
@@ -113,7 +113,7 @@ async function test_narrow_to_private_messages_with_cordelia(page: Page): Promis
you_and_cordelia_selector,
);
const cordelia_user_id = await common.get_user_id_from_name(page, "Cordelia, Lear's daughter");
- const pm_list_selector = `li[data-user-ids-string="${cordelia_user_id}"].expanded_private_message.active-sub-filter`;
+ const pm_list_selector = `li[data-user-ids-string="${cordelia_user_id}"].pm-list-item.active-sub-filter`;
await page.waitForSelector(pm_list_selector, {visible: true});
await close_compose_box(page);
diff --git a/frontend_tests/puppeteer_tests/message-basics.ts b/frontend_tests/puppeteer_tests/message-basics.ts
index 7066190d6e2b3..685002cd7df4b 100644
--- a/frontend_tests/puppeteer_tests/message-basics.ts
+++ b/frontend_tests/puppeteer_tests/message-basics.ts
@@ -280,7 +280,9 @@ async function test_narrow_by_clicking_the_left_sidebar(page: Page): Promise<voi
await page.click(".top_left_all_messages a");
await expect_home(page);
- await page.click(".top_left_private_messages a");
+ const all_private_messages_icon = "#show_all_private_messages";
+ await page.waitForSelector(all_private_messages_icon, {visible: true});
+ await page.click(all_private_messages_icon);
await expect_all_pm(page);
await un_narrow(page);
diff --git a/frontend_tests/puppeteer_tests/navigation.ts b/frontend_tests/puppeteer_tests/navigation.ts
index eb12cbb8d231b..a80a4304b395c 100644
--- a/frontend_tests/puppeteer_tests/navigation.ts
+++ b/frontend_tests/puppeteer_tests/navigation.ts
@@ -64,6 +64,16 @@ async function navigate_to_subscriptions(page: Page): Promise<void> {
await page.waitForSelector("#subscription_overlay", {hidden: true});
}
+async function navigate_to_private_messages(page: Page): Promise<void> {
+ console.log("Navigate to private messages");
+
+ const all_private_messages_icon = "#show_all_private_messages";
+ await page.waitForSelector(all_private_messages_icon, {visible: true});
+ await page.click(all_private_messages_icon);
+
+ await page.waitForSelector("#message_view_header .fa-envelope", {visible: true});
+}
+
async function test_reload_hash(page: Page): Promise<void> {
const initial_page_load_time = await page.evaluate(
(): number => zulip_test.page_params.page_load_time,
@@ -99,7 +109,7 @@ async function navigation_tests(page: Page): Promise<void> {
await navigate_to_subscriptions(page);
await navigate_using_left_sidebar(page, "all_messages", "message_feed_container");
await navigate_to_settings(page);
- await navigate_using_left_sidebar(page, "narrow/is/private", "message_feed_container");
+ await navigate_to_private_messages(page);
await navigate_to_subscriptions(page);
await navigate_using_left_sidebar(page, verona_narrow, "message_feed_container");
diff --git a/static/js/click_handlers.js b/static/js/click_handlers.js
index 3cb75c7be23ad..0e109e9f14350 100644
--- a/static/js/click_handlers.js
+++ b/static/js/click_handlers.js
@@ -32,6 +32,7 @@ import * as notifications from "./notifications";
import * as overlays from "./overlays";
import {page_params} from "./page_params";
import * as people from "./people";
+import * as pm_list from "./pm_list";
import * as popovers from "./popovers";
import * as reactions from "./reactions";
import * as recent_topics_ui from "./recent_topics_ui";
@@ -759,6 +760,47 @@ export function initialize() {
stream_list.toggle_filter_displayed(e);
});
+ $("body").on(
+ "click",
+ ".private_messages_container.zoom-out #private_messages_section_header",
+ (e) => {
+ if (e.target.classList.value === "fa fa-align-right") {
+ // Let the browser handle the "all private messages" widget.
+ return;
+ }
+
+ e.preventDefault();
+ e.stopPropagation();
+ const $left_sidebar_scrollbar = $(
+ "#left_sidebar_scroll_container .simplebar-content-wrapper",
+ );
+ const scroll_position = $left_sidebar_scrollbar.scrollTop();
+
+ // This next bit of logic is a bit subtle; this header
+ // button scrolls to the top of the private messages
+ // section is uncollapsed but out of view; otherwise, we
+ // toggle its collapsed state.
+ if (scroll_position === 0 || pm_list.is_private_messages_collapsed()) {
+ pm_list.toggle_private_messages_section();
+ }
+ $left_sidebar_scrollbar.scrollTop(0);
+ },
+ );
+
+ /* The PRIVATE MESSAGES label's click behavior is complicated;
+ * only when zoomed in does it have a navigation effect, so we need
+ * this click handler rather than just a link. */
+ $("body").on(
+ "click",
+ ".private_messages_container.zoom-in #private_messages_section_header",
+ (e) => {
+ e.preventDefault();
+ e.stopPropagation();
+
+ window.location.hash = "narrow/is/private";
+ },
+ );
+
// WEBATHENA
$("body").on("click", ".webathena_login", (e) => {
diff --git a/static/js/pm_list.js b/static/js/pm_list.js
index a8d28e77e120a..d99900ac474fa 100644
--- a/static/js/pm_list.js
+++ b/static/js/pm_list.js
@@ -1,107 +1,192 @@
import $ from "jquery";
+import _ from "lodash";
-import * as narrow_state from "./narrow_state";
-import * as people from "./people";
import * as pm_list_data from "./pm_list_data";
import * as pm_list_dom from "./pm_list_dom";
+import * as resize from "./resize";
import * as ui from "./ui";
import * as ui_util from "./ui_util";
import * as vdom from "./vdom";
let prior_dom;
-let private_messages_open = false;
// This module manages the "Private messages" section in the upper
// left corner of the app. This was split out from stream_list.js.
-function get_filter_li() {
- return $(".top_left_private_messages .private_messages_header");
-}
+let private_messages_collapsed = false;
+
+// The private messages section can be zoomed in to view more messages.
+// This keeps track of if we're zoomed in or not.
+let zoomed = false;
-function set_count(count) {
- ui_util.update_unread_count_in_dom(get_filter_li(), count);
+function get_private_messages_section_header() {
+ return $(
+ ".private_messages_container #private_messages_section #private_messages_section_header",
+ );
}
-function remove_expanded_private_messages() {
- ui.get_content_element($("#private-container")).empty();
+export function set_count(count) {
+ ui_util.update_unread_count_in_dom(get_private_messages_section_header(), count);
}
-export function close() {
- private_messages_open = false;
- prior_dom = undefined;
- remove_expanded_private_messages();
+function close() {
+ private_messages_collapsed = true;
+ $("#toggle_private_messages_section_icon").removeClass("fa-caret-down");
+ $("#toggle_private_messages_section_icon").addClass("fa-caret-right");
+
+ update_private_messages();
}
export function _build_private_messages_list() {
const conversations = pm_list_data.get_conversations();
- const dom_ast = pm_list_dom.pm_ul(conversations);
+ const pm_list_info = pm_list_data.get_list_info(zoomed);
+ const conversations_to_be_shown = pm_list_info.conversations_to_be_shown;
+ const more_conversations_unread_count = pm_list_info.more_conversations_unread_count;
+
+ const pm_list_nodes = conversations_to_be_shown.map((conversation) =>
+ pm_list_dom.keyed_pm_li(conversation),
+ );
+
+ const all_conversations_shown = conversations_to_be_shown.length === conversations.length;
+ if (!all_conversations_shown) {
+ pm_list_nodes.push(
+ pm_list_dom.more_private_conversations_li(more_conversations_unread_count),
+ );
+ }
+ const dom_ast = pm_list_dom.pm_ul(pm_list_nodes);
return dom_ast;
}
-export function update_private_messages() {
- if (!narrow_state.active()) {
- return;
+function set_dom_to(new_dom) {
+ const $container = ui.get_content_element($("#private_messages_list"));
+
+ function replace_content(html) {
+ $container.html(html);
}
- if (private_messages_open) {
- const $container = ui.get_content_element($("#private-container"));
- const new_dom = _build_private_messages_list();
+ function find() {
+ return $container.find("ul");
+ }
- function replace_content(html) {
- $container.html(html);
- }
+ vdom.update(replace_content, find, new_dom, prior_dom);
+ prior_dom = new_dom;
+}
- function find() {
- return $container.find("ul");
+export function update_private_messages() {
+ if (private_messages_collapsed) {
+ // In the collapsed state, we will still display the current
+ // conversation, to preserve the UI invariant that there's
+ // always something highlighted in the left sidebar.
+ const conversations = pm_list_data.get_conversations();
+ const active_conversation = conversations.find((conversation) => conversation.is_active);
+
+ if (active_conversation) {
+ const node = [pm_list_dom.keyed_pm_li(active_conversation)];
+ const new_dom = pm_list_dom.pm_ul(node);
+ set_dom_to(new_dom);
+ } else {
+ // Otherwise, empty the section.
+ $(".pm-list").empty();
+ prior_dom = undefined;
}
-
- vdom.update(replace_content, find, new_dom, prior_dom);
- prior_dom = new_dom;
+ } else {
+ const new_dom = _build_private_messages_list();
+ set_dom_to(new_dom);
}
+ // Make sure to update the left sidebar heights after updating PMs.
+ setTimeout(resize.resize_stream_filters_container, 0);
}
export function expand() {
- private_messages_open = true;
+ private_messages_collapsed = false;
+ $("#toggle_private_messages_section_icon").addClass("fa-caret-down");
+ $("#toggle_private_messages_section_icon").removeClass("fa-caret-right");
update_private_messages();
- if (pm_list_data.is_all_privates()) {
- $(".top_left_private_messages").addClass("active-filter");
- }
}
export function update_dom_with_unread_counts(counts) {
+ // In theory, we could support passing the counts object through
+ // to pm_list_data, rather than fetching it directly there. But
+ // it's not an important optimization, because it's unlikely a
+ // user would have 10,000s of unread PMs where it could matter.
update_private_messages();
+ // This is just the global unread count.
set_count(counts.private_message_count);
}
-function should_expand_pm_list(filter) {
- const op_is = filter.operands("is");
-
- if (op_is.length >= 1 && op_is.includes("private")) {
- return true;
- }
+export function highlight_all_private_messages_view() {
+ $(".private_messages_container").addClass("active_private_messages_section");
+}
- const op_pm = filter.operands("pm-with");
+function unhighlight_all_private_messages_view() {
+ $(".private_messages_container").removeClass("active_private_messages_section");
+}
- if (op_pm.length !== 1) {
- return false;
+export function handle_narrow_activated(filter) {
+ const active_filter = filter;
+ const is_all_private_message_view = _.isEqual(active_filter.sorted_term_types(), [
+ "is-private",
+ ]);
+ const narrow_to_private_messages_section = active_filter.operands("pm-with").length !== 0;
+
+ if (is_all_private_message_view) {
+ highlight_all_private_messages_view();
+ } else {
+ unhighlight_all_private_messages_view();
}
+ if (narrow_to_private_messages_section) {
+ update_private_messages();
+ }
+}
- const emails_strings = op_pm[0];
- const emails = emails_strings.split(",");
-
- const has_valid_emails = people.is_valid_bulk_emails_for_compose(emails);
+export function handle_narrow_deactivated() {
+ // Since one can renarrow via the keyboard shortcut or similar, we
+ // avoid disturbing the zoomed state here.
+ unhighlight_all_private_messages_view();
+ update_private_messages();
+}
- return has_valid_emails;
+export function is_private_messages_collapsed() {
+ return private_messages_collapsed;
}
-export function handle_narrow_activated(filter) {
- if (should_expand_pm_list(filter)) {
+export function toggle_private_messages_section() {
+ // change the state of PM section depending on the previous state.
+ if (private_messages_collapsed) {
expand();
} else {
close();
}
}
-export function handle_narrow_deactivated() {
- close();
+function zoom_in() {
+ zoomed = true;
+ update_private_messages();
+ $(".private_messages_container").removeClass("zoom-out").addClass("zoom-in");
+ $("#streams_list").hide();
+ $(".left-sidebar .right-sidebar-items").hide();
+}
+
+function zoom_out() {
+ zoomed = false;
+ update_private_messages();
+ $(".private_messages_container").removeClass("zoom-in").addClass("zoom-out");
+ $("#streams_list").show();
+ $(".left-sidebar .right-sidebar-items").show();
+}
+
+export function initialize() {
+ $(".private_messages_container").on("click", "#show_more_private_messages", (e) => {
+ e.stopPropagation();
+ e.preventDefault();
+
+ zoom_in();
+ });
+
+ $(".private_messages_container").on("click", "#hide_more_private_messages", (e) => {
+ e.stopPropagation();
+ e.preventDefault();
+
+ zoom_out();
+ });
}
diff --git a/static/js/pm_list_dom.js b/static/js/pm_list_dom.js
index e3617237bb217..f633b74f15053 100644
--- a/static/js/pm_list_dom.js
+++ b/static/js/pm_list_dom.js
@@ -1,5 +1,6 @@
import _ from "lodash";
+import render_more_private_conversations from "../templates/more_pms.hbs";
import render_pm_list_item from "../templates/pm_list_item.hbs";
import * as vdom from "./vdom";
@@ -19,13 +20,34 @@ export function keyed_pm_li(conversation) {
};
}
-export function pm_ul(conversations) {
+export function more_private_conversations_li(more_conversations_unread_count) {
+ const render = () => render_more_private_conversations({more_conversations_unread_count});
+
+ // Used in vdom.js to check if an element has changed and needs to
+ // be updated in the DOM.
+ const eq = (other) =>
+ other.more_items &&
+ more_conversations_unread_count === other.more_conversations_unread_count;
+
+ // This special key must be impossible as a user_ids_string.
+ const key = "more_private_conversations";
+
+ return {
+ key,
+ more_items: true,
+ more_conversations_unread_count,
+ render,
+ eq,
+ };
+}
+
+export function pm_ul(nodes) {
const attrs = [
- ["class", "expanded_private_messages"],
+ ["class", "pm-list"],
["data-name", "private"],
];
return vdom.ul({
attrs,
- keyed_nodes: conversations.map((conversation) => keyed_pm_li(conversation)),
+ keyed_nodes: nodes,
});
}
diff --git a/static/js/recent_topics_ui.js b/static/js/recent_topics_ui.js
index 3683b87503e51..9356cf63279bc 100644
--- a/static/js/recent_topics_ui.js
+++ b/static/js/recent_topics_ui.js
@@ -20,6 +20,7 @@ import * as narrow_state from "./narrow_state";
import * as navigate from "./navigate";
import {page_params} from "./page_params";
import * as people from "./people";
+import * as pm_list from "./pm_list";
import * as recent_senders from "./recent_senders";
import {get, process_message, topics} from "./recent_topics_data";
import {
@@ -767,6 +768,7 @@ export function show() {
narrow.set_narrow_title(recent_topics_title);
message_view_header.render_title_area();
narrow.handle_middle_pane_transition();
+ pm_list.handle_narrow_deactivated();
complete_rerender();
}
diff --git a/static/js/resize.js b/static/js/resize.js
index aaaa7823b5676..52dd3fa67eb19 100644
--- a/static/js/resize.js
+++ b/static/js/resize.js
@@ -56,7 +56,7 @@ function get_new_heights() {
Number.parseInt($(".narrows_panel").css("marginTop"), 10) -
Number.parseInt($(".narrows_panel").css("marginBottom"), 10) -
$("#global_filters").safeOuterHeight(true) -
- $("#streams_header").safeOuterHeight(true);
+ $("#private_messages_sticky_header").safeOuterHeight(true);
// Don't let us crush the stream sidebar completely out of view
res.stream_filters_max_height = Math.max(80, res.stream_filters_max_height);
@@ -100,10 +100,9 @@ function left_userlist_get_new_heights() {
Number.parseInt($(".narrows_panel").css("marginTop"), 10) -
Number.parseInt($(".narrows_panel").css("marginBottom"), 10) -
$("#global_filters").safeOuterHeight(true) -
- $("#streams_header").safeOuterHeight(true) -
$("#userlist-header").safeOuterHeight(true) -
$("#user_search_section").safeOuterHeight(true) -
- Number.parseInt($stream_filters.css("marginBottom"), 10);
+ $("#private_messages_sticky_header").safeOuterHeight(true);
const blocks = [
{
@@ -211,7 +210,7 @@ export function resize_bottom_whitespace(h) {
export function resize_stream_filters_container(h) {
h = narrow_window ? left_userlist_get_new_heights() : get_new_heights();
resize_bottom_whitespace(h);
- $("#stream-filters-container").css("max-height", h.stream_filters_max_height);
+ $("#left_sidebar_scroll_container").css("max-height", h.stream_filters_max_height);
}
export function resize_sidebars() {
@@ -248,7 +247,7 @@ export function resize_sidebars() {
const h = narrow_window ? left_userlist_get_new_heights() : get_new_heights();
$("#buddy_list_wrapper").css("max-height", h.buddy_list_wrapper_max_height);
- $("#stream-filters-container").css("max-height", h.stream_filters_max_height);
+ $("#left_sidebar_scroll_container").css("max-height", h.stream_filters_max_height);
return h;
}
diff --git a/static/js/scroll_util.js b/static/js/scroll_util.js
index f81ae30a2201e..7275068121149 100644
--- a/static/js/scroll_util.js
+++ b/static/js/scroll_util.js
@@ -20,20 +20,21 @@ export function scroll_delta(opts) {
return delta;
}
-export function scroll_element_into_container($elem, $container) {
+export function scroll_element_into_container($elem, $container, sticky_header_height = 0) {
// This does the minimum amount of scrolling that is needed to make
// the element visible. It doesn't try to center the element, so
// this will be non-intrusive to users when they already have
// the element visible.
$container = ui.get_scroll_element($container);
- const elem_top = $elem.position().top;
+ const elem_top = $elem.position().top - sticky_header_height;
const elem_bottom = elem_top + $elem.innerHeight();
+ const container_height = $container.height() - sticky_header_height;
const opts = {
elem_top,
elem_bottom,
- container_height: $container.height(),
+ container_height,
};
const delta = scroll_delta(opts);
diff --git a/static/js/stream_list.js b/static/js/stream_list.js
index 936211ff95246..cbf55cd303289 100644
--- a/static/js/stream_list.js
+++ b/static/js/stream_list.js
@@ -14,6 +14,7 @@ import * as keydown_util from "./keydown_util";
import {ListCursor} from "./list_cursor";
import * as narrow from "./narrow";
import * as narrow_state from "./narrow_state";
+import * as pm_list from "./pm_list";
import * as popovers from "./popovers";
import * as resize from "./resize";
import * as scroll_util from "./scroll_util";
@@ -268,9 +269,16 @@ export function zoom_in_topics(options) {
$elt.hide();
}
});
+
+ // we also need to hide the PM section and allow
+ // stream list to take complete left-sidebar in zoomedIn view.
+ $(".private_messages_container").hide();
}
export function zoom_out_topics() {
+ // Show PM section
+ $(".private_messages_container").show();
+
// Show stream list titles and pinned stream splitter
$(".stream-filters-label").each(function () {
$(this).show();
@@ -599,16 +607,33 @@ export function set_event_handlers() {
toggle_filter_displayed(e);
});
+ function toggle_pm_header_icon() {
+ if (pm_list.is_private_messages_collapsed()) {
+ return;
+ }
+
+ const scroll_position = $(
+ "#left_sidebar_scroll_container .simplebar-content-wrapper",
+ ).scrollTop();
+ const pm_list_height = $("#private_messages_list").height();
+ if (scroll_position > pm_list_height) {
+ $("#toggle_private_messages_section_icon").addClass("fa-caret-right");
+ $("#toggle_private_messages_section_icon").removeClass("fa-caret-down");
+ } else {
+ $("#toggle_private_messages_section_icon").addClass("fa-caret-down");
+ $("#toggle_private_messages_section_icon").removeClass("fa-caret-right");
+ }
+ }
+
// check for user scrolls on streams list for first time
- ui.get_scroll_element($("#stream-filters-container")).on("scroll", function () {
+ ui.get_scroll_element($("#left_sidebar_scroll_container")).on("scroll", () => {
has_scrolled = true;
- // remove listener once user has scrolled
- $(this).off("scroll");
+ toggle_pm_header_icon();
});
stream_cursor = new ListCursor({
list: {
- scroll_container_sel: "#stream-filters-container",
+ scroll_container_sel: "#left_sidebar_scroll_container",
find_li(opts) {
const stream_id = opts.key;
const li = get_stream_li(stream_id);
@@ -722,14 +747,14 @@ export function toggle_filter_displayed(e) {
}
export function scroll_stream_into_view($stream_li) {
- const $container = $("#stream-filters-container");
+ const $container = $("#left_sidebar_scroll_container");
if ($stream_li.length !== 1) {
blueslip.error("Invalid stream_li was passed in");
return;
}
-
- scroll_util.scroll_element_into_container($stream_li, $container);
+ const stream_header_height = $("#streams_header").outerHeight();
+ scroll_util.scroll_element_into_container($stream_li, $container, stream_header_height);
}
export function maybe_scroll_narrow_into_view() {
diff --git a/static/js/tippyjs.js b/static/js/tippyjs.js
index 3fb9928f7fcb9..c30fbae13e158 100644
--- a/static/js/tippyjs.js
+++ b/static/js/tippyjs.js
@@ -222,7 +222,8 @@ export function initialize() {
delegate("body", {
target: [
".recipient_bar_icon",
- ".sidebar-title",
+ "#streams_header .sidebar-title",
+ "#userlist-title",
"#user_filter_icon",
"#scroll-to-bottom-button-clickable-area",
".code_external_link",
@@ -396,4 +397,39 @@ export function initialize() {
}
},
});
+
+ delegate("body", {
+ target: "#pm_tooltip_container",
+ onShow(instance) {
+ if ($(".private_messages_container").hasClass("zoom-in")) {
+ return false;
+ }
+
+ if ($("#toggle_private_messages_section_icon").hasClass("fa-caret-down")) {
+ instance.setContent(
+ $t({
+ defaultMessage: "Collapse private messages",
+ }),
+ );
+ } else {
+ instance.setContent($t({defaultMessage: "Expand private messages"}));
+ }
+ return true;
+ },
+ delay: [500, 20],
+ appendTo: () => document.body,
+ });
+
+ delegate("body", {
+ target: "#show_all_private_messages",
+ placement: "bottom",
+ onShow(instance) {
+ instance.setContent(
+ $t({
+ defaultMessage: "All private messages (P)",
+ }),
+ );
+ },
+ appendTo: () => document.body,
+ });
}
diff --git a/static/js/top_left_corner.js b/static/js/top_left_corner.js
index fa4731ff49b47..87fb8eacbccd7 100644
--- a/static/js/top_left_corner.js
+++ b/static/js/top_left_corner.js
@@ -1,6 +1,5 @@
import $ from "jquery";
-import * as pm_list from "./pm_list";
import * as resize from "./resize";
import * as ui_util from "./ui_util";
import * as unread_ui from "./unread_ui";
@@ -27,9 +26,8 @@ function remove($elem) {
$elem.removeClass("active-filter active-sub-filter");
}
-function deselect_top_left_corner_items() {
+export function deselect_top_left_corner_items() {
remove($(".top_left_all_messages"));
- remove($(".top_left_private_messages"));
remove($(".top_left_starred_messages"));
remove($(".top_left_mentions"));
remove($(".top_left_recent_topics"));
@@ -73,11 +71,9 @@ export function handle_narrow_deactivated() {
export function narrow_to_recent_topics() {
remove($(".top_left_all_messages"));
- remove($(".top_left_private_messages"));
remove($(".top_left_starred_messages"));
remove($(".top_left_mentions"));
$(".top_left_recent_topics").addClass("active-filter");
- pm_list.close();
setTimeout(() => {
resize.resize_stream_filters_container();
}, 0);
diff --git a/static/js/topic_list.js b/static/js/topic_list.js
index 3701c6a4619b4..9e5734bf2155c 100644
--- a/static/js/topic_list.js
+++ b/static/js/topic_list.js
@@ -331,7 +331,7 @@ export function zoom_in() {
active_widget.build();
}
- ui.get_scroll_element($("#stream-filters-container")).scrollTop(0);
+ ui.get_scroll_element($("#left_sidebar_scroll_container")).scrollTop(0);
const spinner = true;
active_widget.build(spinner);
diff --git a/static/js/ui_init.js b/static/js/ui_init.js
index f87a098f18c29..14573ba6482c6 100644
--- a/static/js/ui_init.js
+++ b/static/js/ui_init.js
@@ -59,6 +59,7 @@ import * as overlays from "./overlays";
import {page_params} from "./page_params";
import * as people from "./people";
import * as pm_conversations from "./pm_conversations";
+import * as pm_list from "./pm_list";
import * as popover_menus from "./popover_menus";
import * as presence from "./presence";
import * as realm_logo from "./realm_logo";
@@ -677,6 +678,7 @@ export function initialize_everything() {
unread_ui.initialize();
activity.initialize();
emoji_picker.initialize();
+ pm_list.initialize();
topic_list.initialize();
topic_zoom.initialize();
drafts.initialize();
diff --git a/static/styles/dark_theme.css b/static/styles/dark_theme.css
index 011b43849e037..12e69c3a5539a 100644
--- a/static/styles/dark_theme.css
+++ b/static/styles/dark_theme.css
@@ -144,7 +144,9 @@ body.dark-theme {
.column-right .right-sidebar,
#groups_overlay .right,
#subscription_overlay .right,
- #settings_page .right {
+ #settings_page .right,
+ #streams_header,
+ .private_messages_container {
background-color: hsl(212, 28%, 18%);
}
@@ -225,6 +227,14 @@ body.dark-theme {
background-color: hsl(208, 17%, 29%);
}
+ .active_private_messages_section {
+ #private_messages_section,
+ #private_messages_list,
+ #hide_more_private_messages {
+ background-color: hsla(199, 33%, 46%, 0.2);
+ }
+ }
+
/* do not turn the .message_header .stream_label text dark on hover because they're
on a dark background, and don't change the dark labels dark either. */
.message_header:not(.dark_background)
diff --git a/static/styles/left_sidebar.css b/static/styles/left_sidebar.css
index f82e8aa7eb267..4dc33d31b8e58 100644
--- a/static/styles/left_sidebar.css
+++ b/static/styles/left_sidebar.css
@@ -8,6 +8,9 @@ $left_col_size: 19px;
the above (and another 5px of padding not measured here) */
$topic_indent: calc($far_left_gutter_size + $left_col_size + 4px);
$topic_resolve_width: 13px;
+/* Space between section in the left sidebar. */
+$sections_vertical_gutter: 8px;
+$bottom_scrolling_buffer: 25px;
#left-sidebar {
#user-list {
@@ -50,11 +53,6 @@ $topic_resolve_width: 13px;
}
}
-.pm_left_col {
- min-width: $left_col_size;
- margin-left: 15px;
-}
-
#stream_filters,
#global_filters {
margin-right: 12px;
@@ -72,7 +70,7 @@ li.show-more-topics {
float: right;
opacity: 0.5;
padding: 3px;
- margin-left: 7px;
+ margin-left: 4px;
&:hover {
opacity: 1;
@@ -89,7 +87,7 @@ li.show-more-topics {
}
#streams_inline_icon {
- margin-right: 10px;
+ margin-right: 8px;
}
.tooltip {
@@ -106,6 +104,10 @@ li.show-more-topics {
li {
a {
padding: 1px 0;
+
+ &:hover {
+ text-decoration: none;
+ }
}
ul {
@@ -190,8 +192,8 @@ li.show-more-topics {
}
}
-#private-container,
-#stream-filters-container {
+#left_sidebar_scroll_container {
+ outline: none;
overflow-x: hidden;
overflow-y: auto;
position: relative;
@@ -199,16 +201,111 @@ li.show-more-topics {
width: 100%;
}
-#stream-filters-container .simplebar-content-wrapper {
- outline: none;
+.private_messages_container {
+ background: hsl(0, 0%, 100%);
+ margin-right: 16px;
+ margin-left: 6px;
+ z-index: 1;
+
+ #toggle_private_messages_section_icon {
+ opacity: 0.7;
+ margin-left: -15px;
+ min-width: 12px;
+
+ &.fa-caret-right {
+ position: relative;
+ left: 3px;
+ }
+
+ &:hover {
+ opacity: 1;
+ }
+ }
+
+ #private_messages_section_header {
+ cursor: pointer;
+ padding: 0 10px 1px 4px;
+ white-space: nowrap;
+
+ #show_all_private_messages {
+ right: 0;
+ float: right;
+ position: absolute;
+ opacity: 0.7;
+ text-decoration: none;
+ color: inherit;
+ margin-right: 21px;
+ margin-top: 1px;
+
+ &:hover {
+ opacity: 1;
+ }
+ }
+
+ .unread_count {
+ margin-right: 16px;
+ margin-top: 2px;
+ }
+ }
+
+ ul.pm-list {
+ list-style-type: none;
+ font-weight: 400;
+ margin-left: 0;
+ margin-bottom: 0;
+
+ span.fa-group {
+ font-size: 90%;
+ }
+
+ li.pm-list-item {
+ position: relative;
+ padding: 1px 10px 1px 4px;
+ margin-left: 2px;
+
+ a {
+ text-decoration: none;
+ color: inherit;
+ }
+
+ .pm_left_col {
+ min-width: $left_col_size;
+ }
+ }
+
+ li#show_more_private_messages {
+ cursor: pointer;
+ padding-right: 26px;
+ padding-left: 6px;
+
+ a {
+ font-size: 12px;
+ }
+
+ .unread_count {
+ margin-top: 2px;
+ }
+ }
+ }
}
-#private-container {
- max-height: 210px;
+.active_private_messages_section {
+ #private_messages_section,
+ #private_messages_list,
+ #hide_more_private_messages {
+ background-color: hsl(202, 56%, 91%);
+ }
+
+ #private_messages_section {
+ border-radius: 4px 4px 0 0;
+ }
- /* Match the opacity for global-filters icons. */
- span.fa-group {
- opacity: 0.7;
+ #private_messages_list {
+ border-radius: 0 0 4px 4px;
+ }
+
+ #more_private_messages_sidebar_title {
+ font-weight: 600;
}
}
@@ -224,7 +321,7 @@ li.show-more-topics {
#subscribe-to-more-streams {
text-decoration: none;
margin: 5px auto 5.5px 10px;
- margin-bottom: 25px;
+ margin-bottom: $bottom_scrolling_buffer;
i {
min-width: 19px;
@@ -277,7 +374,7 @@ li.active-sub-filter {
}
#global_filters {
- margin-bottom: 16px;
+ margin-bottom: $sections_vertical_gutter;
.filter-icon {
display: inline-block;
@@ -301,20 +398,12 @@ li.active-sub-filter {
margin-top: 1px !important;
}
- .expanded_private_message .unread_count {
- /* This margin accounts for the fact that the private messages
- container gets a few pixels taller when expanded */
- margin: 0;
- display: inline;
- }
-
i {
opacity: 0.7;
}
}
li.top_left_all_messages,
-.private_messages_header,
li.top_left_mentions,
li.top_left_starred_messages,
li.top_left_drafts,
@@ -333,12 +422,6 @@ li.top_left_recent_topics {
padding-right: 10px;
}
-.top_left_row,
-.bottom_left_row,
-.top_left_private_messages {
- border-radius: 4px;
-}
-
.conversation-partners {
line-height: 1.25;
}
@@ -348,12 +431,6 @@ li.top_left_recent_topics {
font-size: 15px;
}
-.top_left_private_messages i.fa-envelope {
- position: relative;
- top: -1px;
- font-size: 11px;
-}
-
.top_left_mentions i.fa-at,
.top_left_starred_messages i.fa-star {
font-size: 13px;
@@ -479,32 +556,11 @@ ul.topic-list {
font-weight: normal;
}
-ul.expanded_private_messages {
- list-style-type: none;
-
- span.fa-group {
- font-size: 90%;
- }
- font-weight: 400;
- margin-left: 0;
- padding-bottom: 2px;
-}
-
li.topic-list-item {
position: relative;
padding-right: 5px;
}
-li.expanded_private_message {
- position: relative;
- padding-top: 1px;
- padding-bottom: 1px;
-
- a {
- margin: 1px 0;
- }
-}
-
.show-all-streams {
a {
color: hsl(0, 0%, 20%);
@@ -525,7 +581,7 @@ li.expanded_private_message {
}
.pm-box {
- margin-right: 20px;
+ margin-right: 16px;
align-items: center;
.user_circle {
@@ -548,6 +604,10 @@ li.expanded_private_message {
#topics_header {
display: none;
}
+
+ .zoom-out-hide {
+ display: none;
+ }
}
#topics_header {
@@ -560,7 +620,7 @@ li.expanded_private_message {
text-transform: uppercase;
i {
- margin: 0 5px 0 10px;
+ margin: 0 6px 0 13px;
position: relative;
top: 1px;
}
@@ -569,9 +629,12 @@ li.expanded_private_message {
#streams_header {
margin-right: 12px;
- padding-left: $far_left_gutter_size;
cursor: pointer;
- margin-top: 3px;
+ padding: $sections_vertical_gutter 0 3px $far_left_gutter_size;
+ position: sticky;
+ top: 0;
+ background: hsl(0, 0%, 100%);
+ z-index: 1;
input {
padding-right: 20px;
@@ -643,7 +706,45 @@ li.expanded_private_message {
display: none;
}
+ &.private_messages_container ul.pm-list {
+ margin-bottom: $bottom_scrolling_buffer;
+ }
+
+ #more_private_messages_sidebar_title {
+ display: inline;
+ }
+
+ #hide_more_private_messages {
+ display: block;
+ text-decoration: none;
+ color: inherit;
+ font-size: 12px;
+
+ span {
+ display: block;
+ padding: 2px 0 2px 4px;
+ }
+
+ &:hover {
+ span {
+ background-color: hsla(120, 12.3%, 71.4%, 0.38);
+ border-radius: 4px;
+ }
+ }
+ }
+
.zoom-in-hide {
display: none;
}
+
+ .zoom-in-sticky {
+ position: sticky;
+ top: 0;
+ z-index: 1;
+ padding: 3px 0 3px $far_left_gutter_size;
+ }
+
+ #show_all_private_messages {
+ margin-right: 5px !important;
+ }
}
diff --git a/static/styles/zulip.css b/static/styles/zulip.css
index 44a7570591355..90d052b0a2c58 100644
--- a/static/styles/zulip.css
+++ b/static/styles/zulip.css
@@ -18,7 +18,7 @@ go beneath the header.
*/
$sidebar_top: calc($header_height + $header_padding_bottom);
-/* These need to agree with scroll_bar.js */
+$left_sidebar_collapse_widget_gutter: 10px;
$left_sidebar_width: 270px;
$right_sidebar_width: 250px;
@@ -532,7 +532,7 @@ p.n-margin {
.column-left .left-sidebar {
width: $left_sidebar_width;
- padding-left: 0;
+ padding-left: $left_sidebar_collapse_widget_gutter;
}
.column-right .right-sidebar {
@@ -548,7 +548,9 @@ p.n-margin {
.column-middle,
#compose-content {
margin-right: $right_sidebar_width;
- margin-left: $left_sidebar_width;
+ margin-left: calc(
+ $left_sidebar_width + $left_sidebar_collapse_widget_gutter
+ );
position: relative;
}
diff --git a/static/templates/left_sidebar.hbs b/static/templates/left_sidebar.hbs
index 20ba84226580f..c753c72d83090 100644
--- a/static/templates/left_sidebar.hbs
+++ b/static/templates/left_sidebar.hbs
@@ -22,20 +22,6 @@
<span>{{t 'Recent conversations' }}</span>
</a>
</li>
- <li class="top_left_private_messages hidden-for-spectators">
- <div class="private_messages_header top_left_row" title="{{t 'Private messages' }} (P)">
- <a href="#narrow/is/private">
- <span class="filter-icon">
- <i class="fa fa-envelope" aria-hidden="true"></i>
- </span>
- {{~!-- squash whitespace --~}}
- <span>{{t 'Private messages' }}</span>
- <span class="unread_count"></span>
- </a>
- </div>
- <div id="private-container" class="scrolling_list" data-simplebar>
- </div>
- </li>
<li class="top_left_mentions top_left_row hidden-for-spectators" title="{{t 'Mentions' }}">
<a href="#narrow/is/mentioned">
<span class="filter-icon">
@@ -69,6 +55,31 @@
<span class="arrow drafts-sidebar-menu-icon"><i class="zulip-icon zulip-icon-ellipsis-v-solid" aria-hidden="true"></i></span>
</li>
</ul>
+ </div>
+
+ <div id="private_messages_sticky_header" class="private_messages_container zoom-out hidden-for-spectators">
+ <div id="private_messages_section">
+ <div id="private_messages_section_header" class="zoom-out zoom-in-sticky">
+ <span id="pm_tooltip_container">
+ <i id="toggle_private_messages_section_icon" class="fa fa-sm fa-caret-down toggle_private_messages_section zoom-in-hide" aria-hidden="true"></i>
+ <h4 class="sidebar-title toggle_private_messages_section">{{t 'PRIVATE MESSAGES' }}</h4>
+ </span>
+ <span class="unread_count"></span>
+ <a id="show_all_private_messages" href="#narrow/is/private">
+ <i class="fa fa-align-right" aria-label="{{t 'All private messages' }}"></i>
+ </a>
+ </div>
+ </div>
+ <a class="zoom-out-hide" id="hide_more_private_messages">
+ <span> {{t 'back to streams' }}</span>
+ </a>
+ </div>
+ {{~!-- squash whitespace --~}}
+ <div id="left_sidebar_scroll_container" class="scrolling_list" data-simplebar>
+ <div class="private_messages_container zoom-out hidden-for-spectators">
+ <div id="private_messages_list"></div>
+ </div>
+
<div id="streams_list" class="zoom-out">
<div id="streams_header" class="zoom-in-hide"><h4 class="sidebar-title" data-tippy-content="{{t 'Filter streams' }} (q)">{{t 'STREAMS' }}</h4>
<span class="tippy-zulip-tooltip streams_inline_icon_wrapper hidden-for-spectators" data-tippy-content="{{t 'Add streams' }}">
@@ -85,7 +96,7 @@
<div id="topics_header">
<a class="show-all-streams" tabindex="0"> <i class="fa fa-chevron-left" aria-hidden="true"></i>{{t 'Back to streams' }}</a>
</div>
- <div id="stream-filters-container" class="scrolling_list" data-simplebar>
+ <div id="stream-filters-container">
<ul id="stream_filters" class="filters"></ul>
{{#unless is_guest }}
<div id="subscribe-to-more-streams"></div>
diff --git a/static/templates/more_pms.hbs b/static/templates/more_pms.hbs
new file mode 100644
index 0000000000000..1804e9a31f5e9
--- /dev/null
+++ b/static/templates/more_pms.hbs
@@ -0,0 +1,8 @@
+<li id="show_more_private_messages" class="pm-list-item bottom_left_row {{#unless more_conversations_unread_count}}zero-pm-unreads{{/unless}}">
+ <span>
+ <a class="pm-name" tabindex="0">{{t "more conversations" }}</a>
+ <span class="unread_count {{#unless more_conversations_unread_count}}zero_count{{/unless}}">
+ {{more_conversations_unread_count}}
+ </span>
+ </span>
+</li>
diff --git a/static/templates/pm_list_item.hbs b/static/templates/pm_list_item.hbs
index 6c1ce47295d39..5ec4821e6e3c2 100644
--- a/static/templates/pm_list_item.hbs
+++ b/static/templates/pm_list_item.hbs
@@ -1,4 +1,4 @@
-<li class='{{#if is_active}}active-sub-filter{{/if}} {{#if is_zero}}zero-pm-unreads{{/if}} top_left_row expanded_private_message' data-user-ids-string='{{user_ids_string}}'>
+<li class='{{#if is_active}}active-sub-filter{{/if}} {{#if is_zero}}zero-pm-unreads{{/if}} pm-list-item bottom_left_row' data-user-ids-string='{{user_ids_string}}'>
<span class='pm-box' id='pm_user_status' data-user-ids-string='{{user_ids_string}}' data-is-group='{{is_group}}'>
<div class="pm_left_col">
@@ -18,4 +18,3 @@
</span>
</span>
</li>
-
diff --git a/tools/lib/capitalization.py b/tools/lib/capitalization.py
index b81ceaa8eb66d..d57951da75973 100644
--- a/tools/lib/capitalization.py
+++ b/tools/lib/capitalization.py
@@ -78,6 +78,9 @@
r"more topics",
# Used alone in a parenthetical where capitalized looks worse.
r"^deprecated$",
+ # We want the similar text in the Private Messages section to have the same capitalization.
+ r"more conversations",
+ r"back to streams",
# Capital 'i' looks weird in reminders popover
r"in 1 hour",
r"in 20 minutes",
| Create collapsible "Private messages" section in left sidebar
At present, private messages are collapsed in the left sidebar, unless the user is in a private message narrow. This has a few down sides:
1. Getting to a PM conversation generally requires multiple clicks.
2. It's not immediately clear who send you a new private message, which is important for determining whether one needs to read it right away.
3. It can be hard for new users to figure out how to view and send private messages.
In order to address this, we should try making a private messages section in the left sidebar that is open by default. Specifically:
1. Make a Private messages section just above STREAMS in the left sidebar that is open by default.
2. In the new PMs section, use the same algorithm we use for stream topics to decide how many conversations to show.
3. Make the PMs section collapsible, similar to the collapsible sections in #20072. The open/collapsed state should be sticky as the user navigates around Zulip, closes and reopens the window, logs out and in, etc.
Note that this will likely require experimentation for us to get it right. To avoid misdirected effort, please post screenshots in the #design stream on chat.zulip.org for feedback. Also, if (3) can't be implemented quickly, we can test the experience in chat.zulip.org without waiting for it to be completed.
[Prior discussion on CZO](https://chat.zulip.org/#narrow/stream/101-design/topic/private.20messages.20UI/near/1159032).
See also #11108.
|
mars-project__mars-426 | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .... import opcodes as OperandDef\nfrom ....lib.sparse.core import issparse, get_array_module, cp, cps, sps\nfrom ....utils import on_serialize_shape, on_deserialize_shape\nfrom ....serialize import ValueType, NDArrayField, TupleField\nfrom ...core import TENSOR_TYPE, Tensor\nfrom ..utils import get_chunk_slices\nfrom .core import TensorNoInput\nfrom .scalar import scalar\n\n\nclass ArrayDataSource(TensorNoInput):\n \"\"\"\n Represents data from numpy or cupy array\n \"\"\"\n\n _op_type_ = OperandDef.TENSOR_DATA_SOURCE\n\n _data = NDArrayField('data')\n\n def __init__(self, data=None, dtype=None, gpu=None, **kw):\n if dtype is not None:\n dtype = np.dtype(dtype)\n elif data is not None:\n dtype = np.dtype(data.dtype)\n super(ArrayDataSource, self).__init__(_data=data, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def data(self):\n return self._data\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n chunk_op = self.copy().reset_key()\n chunk_op._data = self.data[get_chunk_slices(chunk_size, idx)]\n\n return chunk_op\n\n\nclass CSRMatrixDataSource(TensorNoInput):\n \"\"\"\n Represents data from sparse array include scipy sparse or cupy sparse matrix.\n \"\"\"\n\n _op_type_ = OperandDef.SPARSE_MATRIX_DATA_SOURCE\n\n _indices = NDArrayField('indices')\n _indptr = NDArrayField('indptr')\n _data = NDArrayField('data')\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n\n def __init__(self, indices=None, indptr=None, data=None, shape=None,\n dtype=None, gpu=None, **kw):\n super(CSRMatrixDataSource, self).__init__(_indices=indices, _indptr=indptr,\n _data=data, _shape=shape, _dtype=dtype,\n _gpu=gpu, _sparse=True, **kw)\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n\n xps = cps if self._gpu else sps\n if len(self._shape) == 1:\n shape = (1, self._shape[0])\n else:\n shape = self._shape\n data = xps.csr_matrix(\n (self._data, self._indices, self._indptr), shape)\n chunk_data = data[get_chunk_slices(chunk_size, idx)]\n\n chunk_op = self.copy().reset_key()\n chunk_op._data = chunk_data.data\n chunk_op._indices = chunk_data.indices\n chunk_op._indptr = chunk_data.indptr\n chunk_shape = chunk_data.shape[1:] \\\n if len(self._shape) == 1 else chunk_data.shape\n chunk_op._shape = chunk_shape\n\n return chunk_op\n\n @property\n def indices(self):\n return self._indices\n\n @property\n def indptr(self):\n return self._indptr\n\n @property\n def data(self):\n return self._data\n\n @property\n def shape(self):\n return self._shape\n\n\ndef _from_spmatrix(spmatrix, dtype=None, chunk_size=None, gpu=None):\n if gpu is None and cp is not None and get_array_module(spmatrix) is cp:\n gpu = True\n if dtype and spmatrix.dtype != dtype:\n spmatrix = spmatrix.astype(dtype)\n spmatrix = spmatrix.tocsr()\n op = CSRMatrixDataSource(indices=spmatrix.indices, indptr=spmatrix.indptr,\n data=spmatrix.data, shape=spmatrix.shape,\n dtype=spmatrix.dtype, gpu=gpu)\n return op(spmatrix.shape, chunk_size=chunk_size)\n\n\ndef tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):\n if isinstance(data, TENSOR_TYPE):\n if dtype is not None and data.dtype != dtype:\n return data.astype(dtype)\n return data\n elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):\n from ..merge import stack\n\n data = stack(data)\n if dtype is not None:\n data = data.astype(dtype)\n return data\n elif np.isscalar(data):\n return scalar(data, dtype=dtype)\n elif issparse(data):\n return _from_spmatrix(data, dtype=dtype, chunk_size=chunk_size, gpu=gpu)\n else:\n m = get_array_module(data)\n data = m.asarray(data, dtype=dtype)\n if gpu is None and cp is not None and m is cp:\n gpu = True\n\n if isinstance(data, np.ndarray):\n if data.ndim == 0:\n return scalar(data.item(), dtype=dtype)\n op = ArrayDataSource(data, dtype=dtype, gpu=gpu)\n t = op(data.shape, chunk_size=chunk_size)\n if sparse and not t.issparse():\n return t.tosparse()\n return t\n else:\n raise ValueError('Cannot create tensor by given data: {0}'.format(data))\n\n\ndef array(x, dtype=None, copy=True, ndmin=None, chunk_size=None):\n \"\"\"\n Create a tensor.\n\n Parameters\n ----------\n object : array_like\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array. If not given, then the type will\n be determined as the minimum type required to hold the objects in the\n sequence. This argument can only be used to 'upcast' the array. For\n downcasting, use the .astype(t) method.\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy will\n only be made if __array__ returns a copy, if obj is a nested sequence,\n or if a copy is needed to satisfy any of the other requirements\n (`dtype`, `order`, etc.).\n ndmin : int, optional\n Specifies the minimum number of dimensions that the resulting\n array should have. Ones will be pre-pended to the shape as\n needed to meet this requirement.\n chunk_size: int, tuple, optional\n Specifies chunk size for each dimension.\n\n Returns\n -------\n out : Tensor\n An tensor object satisfying the specified requirements.\n\n See Also\n --------\n empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.array([1, 2, 3]).execute()\n array([1, 2, 3])\n\n Upcasting:\n\n >>> mt.array([1, 2, 3.0]).execute()\n array([ 1., 2., 3.])\n\n More than one dimension:\n\n >>> mt.array([[1, 2], [3, 4]]).execute()\n array([[1, 2],\n [3, 4]])\n\n Minimum dimensions 2:\n\n >>> mt.array([1, 2, 3], ndmin=2).execute()\n array([[1, 2, 3]])\n\n Type provided:\n\n >>> mt.array([1, 2, 3], dtype=complex).execute()\n array([ 1.+0.j, 2.+0.j, 3.+0.j])\n\n \"\"\"\n raw_x = x\n x = tensor(x, chunk_size=chunk_size)\n if copy and x is raw_x:\n x = Tensor(x.data)\n while ndmin is not None and x.ndim < ndmin:\n x = x[np.newaxis, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\ndef asarray(x, dtype=None):\n \"\"\"Convert the input to an array.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to a tensor. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists and tensors.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n\n Returns\n -------\n out : Tensor\n Tensor interpretation of `a`. No copy is performed if the input\n is already an ndarray with matching dtype and order. If `a` is a\n subclass of ndarray, a base class ndarray is returned.\n\n Examples\n --------\n Convert a list into an array:\n\n >>> import mars.tensor as mt\n\n >>> a = [1, 2]\n >>> mt.asarray(a).execute()\n array([1, 2])\n\n Existing arrays are not copied:\n\n >>> a = mt.array([1, 2])\n >>> mt.asarray(a) is a\n True\n\n If `dtype` is set, array is copied only if dtype does not match:\n\n >>> a = mt.array([1, 2], dtype=mt.float32)\n >>> mt.asarray(a, dtype=mt.float32) is a\n True\n >>> mt.asarray(a, dtype=mt.float64) is a\n False\n \"\"\"\n return array(x, dtype=dtype, copy=False)\n",
"path": "mars/tensor/expressions/datasource/array.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\n\nfrom .... import opcodes as OperandDef\nfrom ....lib.sparse.core import issparse, get_array_module, cp, cps, sps\nfrom ....utils import on_serialize_shape, on_deserialize_shape\nfrom ....serialize import ValueType, NDArrayField, TupleField\nfrom ...core import TENSOR_TYPE, Tensor\nfrom ..utils import get_chunk_slices\nfrom .core import TensorNoInput\nfrom .scalar import scalar\n\n\nclass ArrayDataSource(TensorNoInput):\n \"\"\"\n Represents data from numpy or cupy array\n \"\"\"\n\n _op_type_ = OperandDef.TENSOR_DATA_SOURCE\n\n _data = NDArrayField('data')\n\n def __init__(self, data=None, dtype=None, gpu=None, **kw):\n if dtype is not None:\n dtype = np.dtype(dtype)\n elif data is not None:\n dtype = np.dtype(data.dtype)\n super(ArrayDataSource, self).__init__(_data=data, _dtype=dtype, _gpu=gpu, **kw)\n\n @property\n def data(self):\n return self._data\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n chunk_op = self.copy().reset_key()\n chunk_op._data = self.data[get_chunk_slices(chunk_size, idx)]\n\n return chunk_op\n\n\nclass CSRMatrixDataSource(TensorNoInput):\n \"\"\"\n Represents data from sparse array include scipy sparse or cupy sparse matrix.\n \"\"\"\n\n _op_type_ = OperandDef.SPARSE_MATRIX_DATA_SOURCE\n\n _indices = NDArrayField('indices')\n _indptr = NDArrayField('indptr')\n _data = NDArrayField('data')\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n\n def __init__(self, indices=None, indptr=None, data=None, shape=None,\n dtype=None, gpu=None, **kw):\n super(CSRMatrixDataSource, self).__init__(_indices=indices, _indptr=indptr,\n _data=data, _shape=shape, _dtype=dtype,\n _gpu=gpu, _sparse=True, **kw)\n\n def to_chunk_op(self, *args):\n _, idx, chunk_size = args\n\n xps = cps if self._gpu else sps\n if len(self._shape) == 1:\n shape = (1, self._shape[0])\n else:\n shape = self._shape\n data = xps.csr_matrix(\n (self._data, self._indices, self._indptr), shape)\n chunk_data = data[get_chunk_slices(chunk_size, idx)]\n\n chunk_op = self.copy().reset_key()\n chunk_op._data = chunk_data.data\n chunk_op._indices = chunk_data.indices\n chunk_op._indptr = chunk_data.indptr\n chunk_shape = chunk_data.shape[1:] \\\n if len(self._shape) == 1 else chunk_data.shape\n chunk_op._shape = chunk_shape\n\n return chunk_op\n\n @property\n def indices(self):\n return self._indices\n\n @property\n def indptr(self):\n return self._indptr\n\n @property\n def data(self):\n return self._data\n\n @property\n def shape(self):\n return self._shape\n\n\ndef _from_spmatrix(spmatrix, dtype=None, chunk_size=None, gpu=None):\n if gpu is None and cp is not None and get_array_module(spmatrix) is cp:\n gpu = True\n if dtype and spmatrix.dtype != dtype:\n spmatrix = spmatrix.astype(dtype)\n spmatrix = spmatrix.tocsr()\n op = CSRMatrixDataSource(indices=spmatrix.indices, indptr=spmatrix.indptr,\n data=spmatrix.data, shape=spmatrix.shape,\n dtype=spmatrix.dtype, gpu=gpu)\n return op(spmatrix.shape, chunk_size=chunk_size)\n\n\ndef tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):\n if isinstance(data, TENSOR_TYPE):\n if dtype is not None and data.dtype != dtype:\n return data.astype(dtype)\n return data\n elif isinstance(data, (tuple, list)) and all(isinstance(d, TENSOR_TYPE) for d in data):\n from ..merge import stack\n\n data = stack(data)\n if dtype is not None:\n data = data.astype(dtype)\n return data\n elif np.isscalar(data):\n return scalar(data, dtype=dtype)\n elif issparse(data):\n return _from_spmatrix(data, dtype=dtype, chunk_size=chunk_size, gpu=gpu)\n else:\n m = get_array_module(data)\n data = m.asarray(data, dtype=dtype)\n if gpu is None and cp is not None and m is cp:\n gpu = True\n\n if isinstance(data, np.ndarray):\n if data.ndim == 0:\n return scalar(data.item(), dtype=dtype)\n op = ArrayDataSource(data, dtype=dtype, gpu=gpu)\n t = op(data.shape, chunk_size=chunk_size)\n if sparse and not t.issparse():\n return t.tosparse()\n return t\n else:\n raise ValueError('Cannot create tensor by given data: {0}'.format(data))\n\n\ndef array(x, dtype=None, copy=True, ndmin=None, chunk_size=None):\n \"\"\"\n Create a tensor.\n\n Parameters\n ----------\n object : array_like\n An array, any object exposing the array interface, an object whose\n __array__ method returns an array, or any (nested) sequence.\n dtype : data-type, optional\n The desired data-type for the array. If not given, then the type will\n be determined as the minimum type required to hold the objects in the\n sequence. This argument can only be used to 'upcast' the array. For\n downcasting, use the .astype(t) method.\n copy : bool, optional\n If true (default), then the object is copied. Otherwise, a copy will\n only be made if __array__ returns a copy, if obj is a nested sequence,\n or if a copy is needed to satisfy any of the other requirements\n (`dtype`, `order`, etc.).\n ndmin : int, optional\n Specifies the minimum number of dimensions that the resulting\n array should have. Ones will be pre-pended to the shape as\n needed to meet this requirement.\n chunk_size: int, tuple, optional\n Specifies chunk size for each dimension.\n\n Returns\n -------\n out : Tensor\n An tensor object satisfying the specified requirements.\n\n See Also\n --------\n empty, empty_like, zeros, zeros_like, ones, ones_like, full, full_like\n\n Examples\n --------\n >>> import mars.tensor as mt\n\n >>> mt.array([1, 2, 3]).execute()\n array([1, 2, 3])\n\n Upcasting:\n\n >>> mt.array([1, 2, 3.0]).execute()\n array([ 1., 2., 3.])\n\n More than one dimension:\n\n >>> mt.array([[1, 2], [3, 4]]).execute()\n array([[1, 2],\n [3, 4]])\n\n Minimum dimensions 2:\n\n >>> mt.array([1, 2, 3], ndmin=2).execute()\n array([[1, 2, 3]])\n\n Type provided:\n\n >>> mt.array([1, 2, 3], dtype=complex).execute()\n array([ 1.+0.j, 2.+0.j, 3.+0.j])\n\n \"\"\"\n raw_x = x\n x = tensor(x, chunk_size=chunk_size)\n if copy and x is raw_x:\n x = Tensor(x.data)\n while ndmin is not None and x.ndim < ndmin:\n x = x[np.newaxis, :]\n if dtype is not None and x.dtype != dtype:\n x = x.astype(dtype)\n return x\n\n\ndef asarray(x, dtype=None):\n \"\"\"Convert the input to an array.\n\n Parameters\n ----------\n a : array_like\n Input data, in any form that can be converted to a tensor. This\n includes lists, lists of tuples, tuples, tuples of tuples, tuples\n of lists and tensors.\n dtype : data-type, optional\n By default, the data-type is inferred from the input data.\n\n Returns\n -------\n out : Tensor\n Tensor interpretation of `a`. No copy is performed if the input\n is already an ndarray with matching dtype and order. If `a` is a\n subclass of ndarray, a base class ndarray is returned.\n\n Examples\n --------\n Convert a list into an array:\n\n >>> import mars.tensor as mt\n\n >>> a = [1, 2]\n >>> mt.asarray(a).execute()\n array([1, 2])\n\n Existing arrays are not copied:\n\n >>> a = mt.array([1, 2])\n >>> mt.asarray(a) is a\n True\n\n If `dtype` is set, array is copied only if dtype does not match:\n\n >>> a = mt.array([1, 2], dtype=mt.float32)\n >>> mt.asarray(a, dtype=mt.float32) is a\n True\n >>> mt.asarray(a, dtype=mt.float64) is a\n False\n \"\"\"\n return array(x, dtype=dtype, copy=False)\n",
"path": "mars/tensor/expressions/datasource/array.py"
}
] | diff --git a/mars/tensor/expressions/datasource/array.py b/mars/tensor/expressions/datasource/array.py
index 7cda2d338b..10a8625d27 100644
--- a/mars/tensor/expressions/datasource/array.py
+++ b/mars/tensor/expressions/datasource/array.py
@@ -129,7 +129,7 @@ def tensor(data, dtype=None, chunk_size=None, gpu=None, sparse=False):
if dtype is not None and data.dtype != dtype:
return data.astype(dtype)
return data
- elif isinstance(data, tuple) and all(isinstance(d, TENSOR_TYPE) for d in data):
+ elif isinstance(data, (tuple, list)) and all(isinstance(d, TENSOR_TYPE) for d in data):
from ..merge import stack
data = stack(data)
diff --git a/mars/tensor/expressions/tests/test_datasource.py b/mars/tensor/expressions/tests/test_datasource.py
index b8ed82528e..be4190822a 100644
--- a/mars/tensor/expressions/tests/test_datasource.py
+++ b/mars/tensor/expressions/tests/test_datasource.py
@@ -23,10 +23,20 @@
tiledb = None
from mars.tests.core import TestBase
-from mars.tensor.expressions.datasource import fromtiledb, TensorTileDBDataSource
+from mars.tensor.expressions.datasource import array, fromtiledb, TensorTileDBDataSource
class Test(TestBase):
+ def testFromArray(self):
+ x = array([1, 2, 3])
+ self.assertEqual(x.shape, (3,))
+
+ y = array([x, x])
+ self.assertEqual(y.shape, (2, 3))
+
+ z = array((x, x, x))
+ self.assertEqual(z.shape, (3, 3))
+
@unittest.skipIf(tiledb is None, 'TileDB not installed')
def testFromTileDB(self):
ctx = tiledb.Ctx()
| How to perform all tensor on np.array packages
Example:
```
import mars.tensor as mt
import numpy as np
def test_x():
X1 = mt.tensor([0, 1])
X2 = mt.tensor([2, 3])
X = np.array([X1,X2])
print(X.execute())
```
How to calculate X?
How to perform all tensor on np.array packages
Example:
```
import mars.tensor as mt
import numpy as np
def test_x():
X1 = mt.tensor([0, 1])
X2 = mt.tensor([2, 3])
X = np.array([X1,X2])
print(X.execute())
```
How to calculate X?
|
ansible__ansible-modules-core-3778 | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Dylan Martin <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n# (c) 2016, Dag Wieers <[email protected]>\n# (c) 2016, Virgil Dupras <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: unarchive\nversion_added: 1.4\nshort_description: Unpacks an archive after (optionally) copying it from the local machine.\nextends_documentation_fragment: files\ndescription:\n - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..\noptions:\n src:\n description:\n - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.\n - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)\n required: true\n default: null\n dest:\n description:\n - Remote absolute path where the archive should be unpacked\n required: true\n default: null\n copy:\n description:\n - \"If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.\"\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"yes\"\n creates:\n description:\n - a filename, when it already exists, this step will B(not) be run.\n required: no\n default: null\n version_added: \"1.6\"\n list_files:\n description:\n - If set to True, return the list of files that are contained in the tarball.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n version_added: \"2.0\"\n exclude:\n description:\n - List the directory and file entries that you would like to exclude from the unarchive action.\n required: false\n default: []\n version_added: \"2.1\"\n keep_newer:\n description:\n - Do not replace existing files that are newer than files from the archive.\n required: false\n default: no\n version_added: \"2.1\"\n extra_opts:\n description:\n - Specify additional options by passing in an array.\n default:\n required: false\n version_added: \"2.1\"\n validate_certs:\n description:\n - This only applies if using a https url as the source of the file.\n - This should only set to C(no) used on personally controlled sites using self-signed cer\n - Prior to 2.2 the code worked as if this was set to C(yes).\n required: false\n default: \"yes\"\n choices: [\"yes\", \"no\"]\n version_added: \"2.2\"\nauthor: \"Dag Wieers (@dagwieers)\"\ntodo:\n - re-implement tar support using native tarfile module\n - re-implement zip support using native zipfile module\nnotes:\n - requires C(gtar)/C(unzip) command on target host\n - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files\n - detects type of archive automatically\n - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not\n supported, it will always unpack the archive\n - existing files/directories in the destination which are not in the archive\n are not touched. This is the same behavior as a normal archive extraction\n - existing files/directories in the destination which are not in the archive\n are ignored for purposes of deciding if the archive should be unpacked or not\n'''\n\nEXAMPLES = '''\n# Example from Ansible Playbooks\n- unarchive: src=foo.tgz dest=/var/lib/foo\n\n# Unarchive a file that is already on the remote machine\n- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no\n\n# Unarchive a file that needs to be downloaded (added in 2.0)\n- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no\n'''\n\nimport re\nimport os\nimport stat\nimport pwd\nimport grp\nimport datetime\nimport time\nimport binascii\nfrom zipfile import ZipFile, BadZipfile\nimport tarfile\nimport subprocess\n\n# String from tar that shows the tar contents are different from the\n# filesystem\nOWNER_DIFF_RE = re.compile(r': Uid differs$')\nGROUP_DIFF_RE = re.compile(r': Gid differs$')\nMODE_DIFF_RE = re.compile(r': Mode differs$')\n#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')\nMISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')\nZIP_FILE_MODE_RE = re.compile(r'([r-][w-][stx-]){3}')\n# When downloading an archive, how much of the archive to download before\n# saving to a tempfile (64k)\nBUFSIZE = 65536\n\n# Return a CRC32 checksum of a file\ndef crc32(path):\n return binascii.crc32(open(path).read()) & 0xffffffff\n\nclass UnarchiveError(Exception):\n pass\n\n# class to handle .zip files\nclass ZipArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = module.params['exclude']\n self.includes = []\n self.cmd_path = self.module.get_bin_path('unzip')\n self._files_in_archive = []\n self._infodict = dict()\n\n def _permstr_to_octal(self, modestr, umask):\n ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''\n revstr = modestr[::-1]\n mode = 0\n for j in range(0, 3):\n for i in range(0, 3):\n if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:\n mode += 2**(i+3*j)\n # The unzip utility does not support setting the stST bits\n# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:\n# mode += 2**(9+j)\n return ( mode & ~umask )\n\n def _legacy_file_list(self, force_refresh=False):\n unzip_bin = self.module.get_bin_path('unzip')\n if not unzip_bin:\n raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)\n\n rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])\n if rc:\n raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)\n\n for line in out.splitlines()[3:-2]:\n fields = line.split(None, 7)\n self._files_in_archive.append(fields[7])\n self._infodict[fields[7]] = long(fields[6])\n\n def _crc32(self, path):\n if self._infodict:\n return self._infodict[path]\n\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list()\n else:\n raise\n else:\n try:\n for item in archive.infolist():\n self._infodict[item.filename] = long(item.CRC)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n return self._infodict[path]\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n self._files_in_archive = []\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list(force_refresh)\n else:\n raise\n else:\n try:\n for member in archive.namelist():\n if member not in self.excludes:\n self._files_in_archive.append(member)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n archive.close()\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -ZT -s \"%s\"' % (self.cmd_path, self.src)\n if self.excludes:\n cmd += ' -x \"' + '\" \"'.join(self.excludes) + '\"'\n rc, out, err = self.module.run_command(cmd)\n\n old_out = out\n diff = ''\n out = ''\n if rc == 0:\n unarchived = True\n else:\n unarchived = False\n\n # Get some information related to user/group ownership\n umask = os.umask(0)\n os.umask(umask)\n\n # Get current user and group information\n groups = os.getgroups()\n run_uid = os.getuid()\n run_gid = os.getgid()\n try:\n run_owner = pwd.getpwuid(run_uid).pw_name\n except:\n run_owner = run_uid\n try:\n run_group = grp.getgrgid(run_gid).gr_name\n except:\n run_group = run_gid\n\n # Get future user ownership\n fut_owner = fut_uid = None\n if self.file_args['owner']:\n try:\n tpw = pwd.getpwname(self.file_args['owner'])\n except:\n try:\n tpw = pwd.getpwuid(self.file_args['owner'])\n except:\n tpw = pwd.getpwuid(run_uid)\n fut_owner = tpw.pw_name\n fut_uid = tpw.pw_uid\n else:\n try:\n fut_owner = run_owner\n except:\n pass\n fut_uid = run_uid\n\n # Get future group ownership\n fut_group = fut_gid = None\n if self.file_args['group']:\n try:\n tgr = grp.getgrnam(self.file_args['group'])\n except:\n try:\n tgr = grp.getgrgid(self.file_args['group'])\n except:\n tgr = grp.getgrgid(run_gid)\n fut_group = tgr.gr_name\n fut_gid = tgr.gr_gid\n else:\n try:\n fut_group = run_group\n except:\n pass\n fut_gid = run_gid\n\n for line in old_out.splitlines():\n change = False\n\n pcs = line.split()\n if len(pcs) != 8: continue\n\n ztype = pcs[0][0]\n permstr = pcs[0][1:10]\n version = pcs[0][1]\n ostype = pcs[0][2]\n size = int(pcs[3])\n path = pcs[7]\n\n # Skip excluded files\n if path in self.excludes:\n out += 'Path %s is excluded on request\\n' % path\n continue\n\n # Itemized change requires L for symlink\n if path[-1] == '/':\n if ztype != 'd':\n err += 'Path %s incorrectly tagged as \"%s\", but is a directory.\\n' % (path, ztype)\n ftype = 'd'\n elif ztype == 'l':\n ftype = 'L'\n elif ztype == '-':\n ftype = 'f'\n elif ztype == '?':\n ftype = 'f'\n\n # Some files may be storing FAT permissions, not Unix permissions\n if len(permstr) == 6:\n if path[-1] == '/':\n permstr = 'rwxrwxrwx'\n elif permstr == 'rwx---':\n permstr = 'rwxrwxrwx'\n else:\n permstr = 'rw-rw-rw-'\n\n # Test string conformity\n if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):\n raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)\n\n # DEBUG\n# err += \"%s%s %10d %s\\n\" % (ztype, permstr, size, path)\n\n dest = os.path.join(self.dest, path)\n try:\n st = os.lstat(dest)\n except:\n change = True\n self.includes.append(path)\n err += 'Path %s is missing\\n' % path\n diff += '>%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n # Compare file types\n if ftype == 'd' and not stat.S_ISDIR(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'File %s already exists, but not as a directory\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'f' and not stat.S_ISREG(st.st_mode):\n change = True\n unarchived = False\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a regular file\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'L' and not stat.S_ISLNK(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a symlink\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n itemized = list('.%s.......??' % ftype)\n\n dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))\n timestamp = time.mktime(dt_object.timetuple())\n\n # Compare file timestamps\n if stat.S_ISREG(st.st_mode):\n if self.module.params['keep_newer']:\n if timestamp > st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s is older, replacing file\\n' % path\n itemized[4] = 't'\n elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:\n # Add to excluded files, ignore other changes\n out += 'File %s is newer, excluding file\\n' % path\n continue\n else:\n if timestamp != st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s differs in mtime (%f vs %f)\\n' % (path, timestamp, st.st_mtime)\n itemized[4] = 't'\n\n # Compare file sizes\n if stat.S_ISREG(st.st_mode) and size != st.st_size:\n change = True\n err += 'File %s differs in size (%d vs %d)\\n' % (path, size, st.st_size)\n itemized[3] = 's'\n\n # Compare file checksums\n if stat.S_ISREG(st.st_mode):\n crc = crc32(dest)\n if crc != self._crc32(path):\n change = True\n err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\\n' % (path, self._crc32(path), crc)\n itemized[2] = 'c'\n\n # Compare file permissions\n\n # Do not handle permissions of symlinks\n if ftype != 'L':\n # Only special files require no umask-handling\n if ztype == '?':\n mode = self._permstr_to_octal(permstr, 0)\n else:\n mode = self._permstr_to_octal(permstr, umask)\n if self.file_args['mode'] and self.file_args['mode'] != stat.S_IMODE(st.st_mode):\n change = True\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, self.file_args['mode'], stat.S_IMODE(st.st_mode))\n itemized[5] = 'p'\n elif mode != stat.S_IMODE(st.st_mode):\n change = True\n itemized[5] = 'p'\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, mode, stat.S_IMODE(st.st_mode))\n\n # Compare file user ownership\n owner = uid = None\n try:\n owner = pwd.getpwuid(st.st_uid).pw_name\n except:\n uid = st.st_uid\n\n # If we are not root and requested owner is not our user, fail\n if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):\n raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))\n\n if owner and owner != fut_owner:\n change = True\n err += 'Path %s is owned by user %s, not by user %s as expected\\n' % (path, owner, fut_owner)\n itemized[6] = 'o'\n elif uid and uid != fut_uid:\n change = True\n err += 'Path %s is owned by uid %s, not by uid %s as expected\\n' % (path, uid, fut_uid)\n itemized[6] = 'o'\n\n # Compare file group ownership\n group = gid = None\n try:\n group = grp.getgrgid(st.st_gid).gr_name\n except:\n gid = st.st_gid\n\n if run_uid != 0 and fut_gid not in groups:\n raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))\n\n if group and group != fut_group:\n change = True\n err += 'Path %s is owned by group %s, not by group %s as expected\\n' % (path, group, fut_group)\n itemized[6] = 'g'\n elif gid and gid != fut_gid:\n change = True\n err += 'Path %s is owned by gid %s, not by gid %s as expected\\n' % (path, gid, fut_gid)\n itemized[6] = 'g'\n\n # Register changed files and finalize diff output\n if change:\n if path not in self.includes:\n self.includes.append(path)\n diff += '%s %s\\n' % (''.join(itemized), path)\n\n if self.includes:\n unarchived = False\n\n # DEBUG\n# out = old_out + out\n\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)\n\n def unarchive(self):\n cmd = '%s -o \"%s\"' % (self.cmd_path, self.src)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.includes:\n cmd += ' \"' + '\" \"'.join(self.includes) + '\"'\n # We don't need to handle excluded files, since we simply do not include them\n# if self.excludes:\n# cmd += ' -x ' + ' '.join(self.excludes)\n cmd += ' -d \"%s\"' % self.dest\n rc, out, err = self.module.run_command(cmd)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n cmd = '%s -l \"%s\"' % (self.cmd_path, self.src)\n rc, out, err = self.module.run_command(cmd)\n if rc == 0:\n return True\n return False\n\n\n# class to handle gzipped tar files\nclass TgzArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]\n # Prefer gtar (GNU tar) as it supports the compression options -zjJ\n self.cmd_path = self.module.get_bin_path('gtar', None)\n if not self.cmd_path:\n # Fallback to tar\n self.cmd_path = self.module.get_bin_path('tar')\n self.zipflag = 'z'\n self.compress_mode = 'gz'\n self._files_in_archive = []\n\n def _get_tar_fileobj(self):\n \"\"\"Returns a file object that can be read by ``tarfile.open()``.\"\"\"\n return open(self.src, 'rb')\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n # The use of Python's tarfile module here allows us to easily avoid tricky file encoding\n # problems. Ref #11348\n try:\n tf = tarfile.open(fileobj=self._get_tar_fileobj(), mode='r:%s' % self.compress_mode)\n except Exception:\n raise UnarchiveError('Unable to list files in the archive')\n\n for filename in tf.getnames():\n if filename and filename not in self.excludes:\n self._files_in_archive.append(filename)\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -C \"%s\" -d%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % self.src\n rc, out, err = self.module.run_command(cmd)\n\n # Check whether the differences are in something that we're\n # setting anyway\n\n # What is different\n unarchived = True\n old_out = out\n out = ''\n run_uid = os.getuid()\n # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient\n # Only way to be sure is to check request with what is on disk (as we do for zip)\n # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change\n for line in old_out.splitlines() + err.splitlines():\n if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):\n out += line + '\\n'\n if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):\n out += line + '\\n'\n if not self.file_args['mode'] and MODE_DIFF_RE.search(line):\n out += line + '\\n'\n if MISSING_FILE_RE.search(line):\n out += line + '\\n'\n if out:\n unarchived = False\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)\n\n def unarchive(self):\n cmd = '%s -C \"%s\" -x%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % (self.src)\n rc, out, err = self.module.run_command(cmd, cwd=self.dest)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n\n try:\n if self.files_in_archive:\n return True\n except UnarchiveError:\n pass\n # Errors and no files in archive assume that we weren't able to\n # properly unarchive it\n return False\n\n\n# class to handle tar files that aren't compressed\nclass TarArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarArchive, self).__init__(src, dest, file_args, module)\n # argument to tar\n self.zipflag = ''\n # parameter for python tarfile library\n self.compress_mode = ''\n\n\n# class to handle bzip2 compressed tar files\nclass TarBzipArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarBzipArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'j'\n self.compress_mode = 'bz2'\n\n\n# class to handle xz compressed tar files\nclass TarXzArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarXzArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'J'\n self.compress_mode = ''\n\n def _get_tar_fileobj(self):\n # Python's tarfile module doesn't support xz compression so we have to manually uncompress\n # it first.\n xz_bin_path = self.module.get_bin_path('xz')\n xz_stdout = tempfile.TemporaryFile()\n # we don't use self.module.run_command() to avoid loading the whole archive in memory.\n cmd = subprocess.Popen([xz_bin_path, '-dc', self.src], stdout=xz_stdout)\n rc = cmd.wait()\n if rc != 0:\n raise UnarchiveError(\"Could not uncompress with xz\")\n xz_stdout.seek(0)\n return xz_stdout\n\n\n# try handlers in order and return the one that works or bail if none work\ndef pick_handler(src, dest, file_args, module):\n handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]\n for handler in handlers:\n obj = handler(src, dest, file_args, module)\n if obj.can_handle_archive():\n return obj\n module.fail_json(msg='Failed to find handler for \"%s\". Make sure the required command to extract the file is installed.' % src)\n\n\ndef main():\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n src = dict(required=True, type='path'),\n original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack\n dest = dict(required=True, type='path'),\n copy = dict(default=True, type='bool'),\n creates = dict(required=False, type='path'),\n list_files = dict(required=False, default=False, type='bool'),\n keep_newer = dict(required=False, default=False, type='bool'),\n exclude = dict(required=False, default=[], type='list'),\n extra_opts = dict(required=False, default=[], type='list'),\n validate_certs = dict(required=False, default=True, type='bool'),\n ),\n add_file_common_args = True,\n# supports_check_mode = True,\n )\n\n src = os.path.expanduser(module.params['src'])\n dest = os.path.expanduser(module.params['dest'])\n copy = module.params['copy']\n file_args = module.load_file_common_arguments(module.params)\n # did tar file arrive?\n if not os.path.exists(src):\n if copy:\n module.fail_json(msg=\"Source '%s' failed to transfer\" % src)\n # If copy=false, and src= contains ://, try and download the file to a temp directory.\n elif '://' in src:\n tempdir = os.path.dirname(os.path.realpath(__file__))\n package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))\n try:\n rsp, info = fetch_url(module, src)\n # If download fails, raise a proper exception\n if rsp is None:\n raise Exception(info['msg'])\n f = open(package, 'w')\n # Read 1kb at a time to save on ram\n while True:\n data = rsp.read(BUFSIZE)\n\n if data == \"\":\n break # End of file, break while loop\n\n f.write(data)\n f.close()\n src = package\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Failure downloading %s, %s\" % (src, e))\n else:\n module.fail_json(msg=\"Source '%s' does not exist\" % src)\n if not os.access(src, os.R_OK):\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # skip working with 0 size archives\n try:\n if os.path.getsize(src) == 0:\n module.fail_json(msg=\"Invalid archive '%s', the file is 0 bytes\" % src)\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # is dest OK to receive tar file?\n if not os.path.isdir(dest):\n module.fail_json(msg=\"Destination '%s' is not a directory\" % dest)\n\n handler = pick_handler(src, dest, file_args, module)\n\n res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)\n\n # do we need to do unpack?\n check_results = handler.is_unarchived()\n\n # DEBUG\n# res_args['check_results'] = check_results\n\n if check_results['unarchived']:\n res_args['changed'] = False\n else:\n # do the unpack\n try:\n res_args['extract_results'] = handler.unarchive()\n if res_args['extract_results']['rc'] != 0:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n except IOError:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n else:\n res_args['changed'] = True\n\n if check_results.get('diff', False):\n res_args['diff'] = { 'prepared': check_results['diff'] }\n\n # Run only if we found differences (idempotence) or diff was missing\n if res_args.get('diff', True):\n # do we need to change perms?\n for filename in handler.files_in_archive:\n file_args['path'] = os.path.join(dest, filename)\n try:\n res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])\n except (IOError, OSError):\n e = get_exception()\n module.fail_json(msg=\"Unexpected error when accessing exploded file: %s\" % str(e), **res_args)\n\n if module.params['list_files']:\n res_args['files'] = handler.files_in_archive\n\n module.exit_json(**res_args)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nif __name__ == '__main__':\n main()\n",
"path": "files/unarchive.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# (c) 2012, Michael DeHaan <[email protected]>\n# (c) 2013, Dylan Martin <[email protected]>\n# (c) 2015, Toshio Kuratomi <[email protected]>\n# (c) 2016, Dag Wieers <[email protected]>\n# (c) 2016, Virgil Dupras <[email protected]>\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see <http://www.gnu.org/licenses/>.\n\nDOCUMENTATION = '''\n---\nmodule: unarchive\nversion_added: 1.4\nshort_description: Unpacks an archive after (optionally) copying it from the local machine.\nextends_documentation_fragment: files\ndescription:\n - The M(unarchive) module unpacks an archive. By default, it will copy the source file from the local system to the target before unpacking - set copy=no to unpack an archive which already exists on the target..\noptions:\n src:\n description:\n - If copy=yes (default), local path to archive file to copy to the target server; can be absolute or relative. If copy=no, path on the target server to existing archive file to unpack.\n - If copy=no and src contains ://, the remote machine will download the file from the url first. (version_added 2.0)\n required: true\n default: null\n dest:\n description:\n - Remote absolute path where the archive should be unpacked\n required: true\n default: null\n copy:\n description:\n - \"If true, the file is copied from local 'master' to the target machine, otherwise, the plugin will look for src archive at the target machine.\"\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"yes\"\n creates:\n description:\n - a filename, when it already exists, this step will B(not) be run.\n required: no\n default: null\n version_added: \"1.6\"\n list_files:\n description:\n - If set to True, return the list of files that are contained in the tarball.\n required: false\n choices: [ \"yes\", \"no\" ]\n default: \"no\"\n version_added: \"2.0\"\n exclude:\n description:\n - List the directory and file entries that you would like to exclude from the unarchive action.\n required: false\n default: []\n version_added: \"2.1\"\n keep_newer:\n description:\n - Do not replace existing files that are newer than files from the archive.\n required: false\n default: no\n version_added: \"2.1\"\n extra_opts:\n description:\n - Specify additional options by passing in an array.\n default:\n required: false\n version_added: \"2.1\"\n validate_certs:\n description:\n - This only applies if using a https url as the source of the file.\n - This should only set to C(no) used on personally controlled sites using self-signed cer\n - Prior to 2.2 the code worked as if this was set to C(yes).\n required: false\n default: \"yes\"\n choices: [\"yes\", \"no\"]\n version_added: \"2.2\"\nauthor: \"Dag Wieers (@dagwieers)\"\ntodo:\n - re-implement tar support using native tarfile module\n - re-implement zip support using native zipfile module\nnotes:\n - requires C(gtar)/C(unzip) command on target host\n - can handle I(gzip), I(bzip2) and I(xz) compressed as well as uncompressed tar files\n - detects type of archive automatically\n - uses gtar's C(--diff arg) to calculate if changed or not. If this C(arg) is not\n supported, it will always unpack the archive\n - existing files/directories in the destination which are not in the archive\n are not touched. This is the same behavior as a normal archive extraction\n - existing files/directories in the destination which are not in the archive\n are ignored for purposes of deciding if the archive should be unpacked or not\n'''\n\nEXAMPLES = '''\n# Example from Ansible Playbooks\n- unarchive: src=foo.tgz dest=/var/lib/foo\n\n# Unarchive a file that is already on the remote machine\n- unarchive: src=/tmp/foo.zip dest=/usr/local/bin copy=no\n\n# Unarchive a file that needs to be downloaded (added in 2.0)\n- unarchive: src=https://example.com/example.zip dest=/usr/local/bin copy=no\n'''\n\nimport re\nimport os\nimport stat\nimport pwd\nimport grp\nimport datetime\nimport time\nimport binascii\nfrom zipfile import ZipFile, BadZipfile\nimport tarfile\nimport subprocess\n\n# String from tar that shows the tar contents are different from the\n# filesystem\nOWNER_DIFF_RE = re.compile(r': Uid differs$')\nGROUP_DIFF_RE = re.compile(r': Gid differs$')\nMODE_DIFF_RE = re.compile(r': Mode differs$')\n#NEWER_DIFF_RE = re.compile(r' is newer or same age.$')\nMISSING_FILE_RE = re.compile(r': Warning: Cannot stat: No such file or directory$')\nZIP_FILE_MODE_RE = re.compile(r'([r-][w-][stx-]){3}')\n# When downloading an archive, how much of the archive to download before\n# saving to a tempfile (64k)\nBUFSIZE = 65536\n\n# Return a CRC32 checksum of a file\ndef crc32(path):\n return binascii.crc32(open(path).read()) & 0xffffffff\n\nclass UnarchiveError(Exception):\n pass\n\n# class to handle .zip files\nclass ZipArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = module.params['exclude']\n self.includes = []\n self.cmd_path = self.module.get_bin_path('unzip')\n self._files_in_archive = []\n self._infodict = dict()\n\n def _permstr_to_octal(self, modestr, umask):\n ''' Convert a Unix permission string (rw-r--r--) into a mode (0644) '''\n revstr = modestr[::-1]\n mode = 0\n for j in range(0, 3):\n for i in range(0, 3):\n if revstr[i+3*j] in ['r', 'w', 'x', 's', 't']:\n mode += 2**(i+3*j)\n # The unzip utility does not support setting the stST bits\n# if revstr[i+3*j] in ['s', 't', 'S', 'T' ]:\n# mode += 2**(9+j)\n return ( mode & ~umask )\n\n def _legacy_file_list(self, force_refresh=False):\n unzip_bin = self.module.get_bin_path('unzip')\n if not unzip_bin:\n raise UnarchiveError('Python Zipfile cannot read %s and unzip not found' % self.src)\n\n rc, out, err = self.module.run_command([unzip_bin, '-v', self.src])\n if rc:\n raise UnarchiveError('Neither python zipfile nor unzip can read %s' % self.src)\n\n for line in out.splitlines()[3:-2]:\n fields = line.split(None, 7)\n self._files_in_archive.append(fields[7])\n self._infodict[fields[7]] = long(fields[6])\n\n def _crc32(self, path):\n if self._infodict:\n return self._infodict[path]\n\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list()\n else:\n raise\n else:\n try:\n for item in archive.infolist():\n self._infodict[item.filename] = long(item.CRC)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n return self._infodict[path]\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n self._files_in_archive = []\n try:\n archive = ZipFile(self.src)\n except BadZipfile:\n e = get_exception()\n if e.args[0].lower().startswith('bad magic number'):\n # Python2.4 can't handle zipfiles with > 64K files. Try using\n # /usr/bin/unzip instead\n self._legacy_file_list(force_refresh)\n else:\n raise\n else:\n try:\n for member in archive.namelist():\n if member not in self.excludes:\n self._files_in_archive.append(member)\n except:\n archive.close()\n raise UnarchiveError('Unable to list files in the archive')\n\n archive.close()\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -ZT -s \"%s\"' % (self.cmd_path, self.src)\n if self.excludes:\n cmd += ' -x \"' + '\" \"'.join(self.excludes) + '\"'\n rc, out, err = self.module.run_command(cmd)\n\n old_out = out\n diff = ''\n out = ''\n if rc == 0:\n unarchived = True\n else:\n unarchived = False\n\n # Get some information related to user/group ownership\n umask = os.umask(0)\n os.umask(umask)\n\n # Get current user and group information\n groups = os.getgroups()\n run_uid = os.getuid()\n run_gid = os.getgid()\n try:\n run_owner = pwd.getpwuid(run_uid).pw_name\n except:\n run_owner = run_uid\n try:\n run_group = grp.getgrgid(run_gid).gr_name\n except:\n run_group = run_gid\n\n # Get future user ownership\n fut_owner = fut_uid = None\n if self.file_args['owner']:\n try:\n tpw = pwd.getpwname(self.file_args['owner'])\n except:\n try:\n tpw = pwd.getpwuid(self.file_args['owner'])\n except:\n tpw = pwd.getpwuid(run_uid)\n fut_owner = tpw.pw_name\n fut_uid = tpw.pw_uid\n else:\n try:\n fut_owner = run_owner\n except:\n pass\n fut_uid = run_uid\n\n # Get future group ownership\n fut_group = fut_gid = None\n if self.file_args['group']:\n try:\n tgr = grp.getgrnam(self.file_args['group'])\n except:\n try:\n tgr = grp.getgrgid(self.file_args['group'])\n except:\n tgr = grp.getgrgid(run_gid)\n fut_group = tgr.gr_name\n fut_gid = tgr.gr_gid\n else:\n try:\n fut_group = run_group\n except:\n pass\n fut_gid = run_gid\n\n for line in old_out.splitlines():\n change = False\n\n pcs = line.split()\n if len(pcs) != 8: continue\n\n ztype = pcs[0][0]\n permstr = pcs[0][1:10]\n version = pcs[0][1]\n ostype = pcs[0][2]\n size = int(pcs[3])\n path = pcs[7]\n\n # Skip excluded files\n if path in self.excludes:\n out += 'Path %s is excluded on request\\n' % path\n continue\n\n # Itemized change requires L for symlink\n if path[-1] == '/':\n if ztype != 'd':\n err += 'Path %s incorrectly tagged as \"%s\", but is a directory.\\n' % (path, ztype)\n ftype = 'd'\n elif ztype == 'l':\n ftype = 'L'\n elif ztype == '-':\n ftype = 'f'\n elif ztype == '?':\n ftype = 'f'\n\n # Some files may be storing FAT permissions, not Unix permissions\n if len(permstr) == 6:\n if path[-1] == '/':\n permstr = 'rwxrwxrwx'\n elif permstr == 'rwx---':\n permstr = 'rwxrwxrwx'\n else:\n permstr = 'rw-rw-rw-'\n\n # Test string conformity\n if len(permstr) != 9 or not ZIP_FILE_MODE_RE.match(permstr):\n raise UnarchiveError('ZIP info perm format incorrect, %s' % permstr)\n\n # DEBUG\n# err += \"%s%s %10d %s\\n\" % (ztype, permstr, size, path)\n\n dest = os.path.join(self.dest, path)\n try:\n st = os.lstat(dest)\n except:\n change = True\n self.includes.append(path)\n err += 'Path %s is missing\\n' % path\n diff += '>%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n # Compare file types\n if ftype == 'd' and not stat.S_ISDIR(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'File %s already exists, but not as a directory\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'f' and not stat.S_ISREG(st.st_mode):\n change = True\n unarchived = False\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a regular file\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n if ftype == 'L' and not stat.S_ISLNK(st.st_mode):\n change = True\n self.includes.append(path)\n err += 'Directory %s already exists, but not as a symlink\\n' % path\n diff += 'c%s++++++.?? %s\\n' % (ftype, path)\n continue\n\n itemized = list('.%s.......??' % ftype)\n\n dt_object = datetime.datetime(*(time.strptime(pcs[6], '%Y%m%d.%H%M%S')[0:6]))\n timestamp = time.mktime(dt_object.timetuple())\n\n # Compare file timestamps\n if stat.S_ISREG(st.st_mode):\n if self.module.params['keep_newer']:\n if timestamp > st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s is older, replacing file\\n' % path\n itemized[4] = 't'\n elif stat.S_ISREG(st.st_mode) and timestamp < st.st_mtime:\n # Add to excluded files, ignore other changes\n out += 'File %s is newer, excluding file\\n' % path\n continue\n else:\n if timestamp != st.st_mtime:\n change = True\n self.includes.append(path)\n err += 'File %s differs in mtime (%f vs %f)\\n' % (path, timestamp, st.st_mtime)\n itemized[4] = 't'\n\n # Compare file sizes\n if stat.S_ISREG(st.st_mode) and size != st.st_size:\n change = True\n err += 'File %s differs in size (%d vs %d)\\n' % (path, size, st.st_size)\n itemized[3] = 's'\n\n # Compare file checksums\n if stat.S_ISREG(st.st_mode):\n crc = crc32(dest)\n if crc != self._crc32(path):\n change = True\n err += 'File %s differs in CRC32 checksum (0x%08x vs 0x%08x)\\n' % (path, self._crc32(path), crc)\n itemized[2] = 'c'\n\n # Compare file permissions\n\n # Do not handle permissions of symlinks\n if ftype != 'L':\n # Only special files require no umask-handling\n if ztype == '?':\n mode = self._permstr_to_octal(permstr, 0)\n else:\n mode = self._permstr_to_octal(permstr, umask)\n if self.file_args['mode'] and self.file_args['mode'] != stat.S_IMODE(st.st_mode):\n change = True\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, self.file_args['mode'], stat.S_IMODE(st.st_mode))\n itemized[5] = 'p'\n elif mode != stat.S_IMODE(st.st_mode):\n change = True\n itemized[5] = 'p'\n err += 'Path %s differs in permissions (%o vs %o)\\n' % (path, mode, stat.S_IMODE(st.st_mode))\n\n # Compare file user ownership\n owner = uid = None\n try:\n owner = pwd.getpwuid(st.st_uid).pw_name\n except:\n uid = st.st_uid\n\n # If we are not root and requested owner is not our user, fail\n if run_uid != 0 and (fut_owner != run_owner or fut_uid != run_uid):\n raise UnarchiveError('Cannot change ownership of %s to %s, as user %s' % (path, fut_owner, run_owner))\n\n if owner and owner != fut_owner:\n change = True\n err += 'Path %s is owned by user %s, not by user %s as expected\\n' % (path, owner, fut_owner)\n itemized[6] = 'o'\n elif uid and uid != fut_uid:\n change = True\n err += 'Path %s is owned by uid %s, not by uid %s as expected\\n' % (path, uid, fut_uid)\n itemized[6] = 'o'\n\n # Compare file group ownership\n group = gid = None\n try:\n group = grp.getgrgid(st.st_gid).gr_name\n except:\n gid = st.st_gid\n\n if run_uid != 0 and fut_gid not in groups:\n raise UnarchiveError('Cannot change group ownership of %s to %s, as user %s' % (path, fut_group, run_owner))\n\n if group and group != fut_group:\n change = True\n err += 'Path %s is owned by group %s, not by group %s as expected\\n' % (path, group, fut_group)\n itemized[6] = 'g'\n elif gid and gid != fut_gid:\n change = True\n err += 'Path %s is owned by gid %s, not by gid %s as expected\\n' % (path, gid, fut_gid)\n itemized[6] = 'g'\n\n # Register changed files and finalize diff output\n if change:\n if path not in self.includes:\n self.includes.append(path)\n diff += '%s %s\\n' % (''.join(itemized), path)\n\n if self.includes:\n unarchived = False\n\n # DEBUG\n# out = old_out + out\n\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd, diff=diff)\n\n def unarchive(self):\n cmd = '%s -o \"%s\"' % (self.cmd_path, self.src)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.includes:\n cmd += ' \"' + '\" \"'.join(self.includes) + '\"'\n # We don't need to handle excluded files, since we simply do not include them\n# if self.excludes:\n# cmd += ' -x ' + ' '.join(self.excludes)\n cmd += ' -d \"%s\"' % self.dest\n rc, out, err = self.module.run_command(cmd)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n cmd = '%s -l \"%s\"' % (self.cmd_path, self.src)\n rc, out, err = self.module.run_command(cmd)\n if rc == 0:\n return True\n return False\n\n\n# class to handle gzipped tar files\nclass TgzArchive(object):\n\n def __init__(self, src, dest, file_args, module):\n self.src = src\n self.dest = dest\n self.file_args = file_args\n self.opts = module.params['extra_opts']\n self.module = module\n self.excludes = [ path.rstrip('/') for path in self.module.params['exclude']]\n # Prefer gtar (GNU tar) as it supports the compression options -zjJ\n self.cmd_path = self.module.get_bin_path('gtar', None)\n if not self.cmd_path:\n # Fallback to tar\n self.cmd_path = self.module.get_bin_path('tar')\n self.zipflag = 'z'\n self.compress_mode = 'gz'\n self._files_in_archive = []\n\n def _get_tar_fileobj(self):\n \"\"\"Returns a file object that can be read by ``tarfile.open()``.\"\"\"\n return open(self.src, 'rb')\n\n @property\n def files_in_archive(self, force_refresh=False):\n if self._files_in_archive and not force_refresh:\n return self._files_in_archive\n\n # The use of Python's tarfile module here allows us to easily avoid tricky file encoding\n # problems. Ref #11348\n try:\n tf = tarfile.open(fileobj=self._get_tar_fileobj(), mode='r:%s' % self.compress_mode)\n except Exception:\n raise UnarchiveError('Unable to list files in the archive')\n\n for filename in tf.getnames():\n if filename and filename not in self.excludes:\n self._files_in_archive.append(filename)\n return self._files_in_archive\n\n def is_unarchived(self):\n cmd = '%s -C \"%s\" -d%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % self.src\n rc, out, err = self.module.run_command(cmd)\n\n # Check whether the differences are in something that we're\n # setting anyway\n\n # What is different\n unarchived = True\n old_out = out\n out = ''\n run_uid = os.getuid()\n # When unarchiving as a user, or when owner/group/mode is supplied --diff is insufficient\n # Only way to be sure is to check request with what is on disk (as we do for zip)\n # Leave this up to set_fs_attributes_if_different() instead of inducing a (false) change\n for line in old_out.splitlines() + err.splitlines():\n if run_uid == 0 and not self.file_args['owner'] and OWNER_DIFF_RE.search(line):\n out += line + '\\n'\n if run_uid == 0 and not self.file_args['group'] and GROUP_DIFF_RE.search(line):\n out += line + '\\n'\n if not self.file_args['mode'] and MODE_DIFF_RE.search(line):\n out += line + '\\n'\n if MISSING_FILE_RE.search(line):\n out += line + '\\n'\n if out:\n unarchived = False\n return dict(unarchived=unarchived, rc=rc, out=out, err=err, cmd=cmd)\n\n def unarchive(self):\n cmd = '%s -C \"%s\" -x%s' % (self.cmd_path, self.dest, self.zipflag)\n if self.opts:\n cmd += ' ' + ' '.join(self.opts)\n if self.file_args['owner']:\n cmd += ' --owner=\"%s\"' % self.file_args['owner']\n if self.file_args['group']:\n cmd += ' --group=\"%s\"' % self.file_args['group']\n if self.file_args['mode']:\n cmd += ' --mode=\"%s\"' % self.file_args['mode']\n if self.module.params['keep_newer']:\n cmd += ' --keep-newer-files'\n if self.excludes:\n cmd += ' --exclude=\"' + '\" --exclude=\"'.join(self.excludes) + '\"'\n cmd += ' -f \"%s\"' % (self.src)\n rc, out, err = self.module.run_command(cmd, cwd=self.dest)\n return dict(cmd=cmd, rc=rc, out=out, err=err)\n\n def can_handle_archive(self):\n if not self.cmd_path:\n return False\n\n try:\n if self.files_in_archive:\n return True\n except UnarchiveError:\n pass\n # Errors and no files in archive assume that we weren't able to\n # properly unarchive it\n return False\n\n\n# class to handle tar files that aren't compressed\nclass TarArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarArchive, self).__init__(src, dest, file_args, module)\n # argument to tar\n self.zipflag = ''\n # parameter for python tarfile library\n self.compress_mode = ''\n\n\n# class to handle bzip2 compressed tar files\nclass TarBzipArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarBzipArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'j'\n self.compress_mode = 'bz2'\n\n\n# class to handle xz compressed tar files\nclass TarXzArchive(TgzArchive):\n def __init__(self, src, dest, file_args, module):\n super(TarXzArchive, self).__init__(src, dest, file_args, module)\n self.zipflag = 'J'\n self.compress_mode = ''\n\n def _get_tar_fileobj(self):\n # Python's tarfile module doesn't support xz compression so we have to manually uncompress\n # it first.\n xz_bin_path = self.module.get_bin_path('xz')\n xz_stdout = tempfile.TemporaryFile()\n # we don't use self.module.run_command() to avoid loading the whole archive in memory.\n cmd = subprocess.Popen([xz_bin_path, '-dc', self.src], stdout=xz_stdout)\n rc = cmd.wait()\n if rc != 0:\n raise UnarchiveError(\"Could not uncompress with xz\")\n xz_stdout.seek(0)\n return xz_stdout\n\n\n# try handlers in order and return the one that works or bail if none work\ndef pick_handler(src, dest, file_args, module):\n handlers = [TgzArchive, ZipArchive, TarArchive, TarBzipArchive, TarXzArchive]\n for handler in handlers:\n obj = handler(src, dest, file_args, module)\n if obj.can_handle_archive():\n return obj\n module.fail_json(msg='Failed to find handler for \"%s\". Make sure the required command to extract the file is installed.' % src)\n\n\ndef main():\n module = AnsibleModule(\n # not checking because of daisy chain to file module\n argument_spec = dict(\n src = dict(required=True, type='path'),\n original_basename = dict(required=False, type='str'), # used to handle 'dest is a directory' via template, a slight hack\n dest = dict(required=True, type='path'),\n copy = dict(default=True, type='bool'),\n creates = dict(required=False, type='path'),\n list_files = dict(required=False, default=False, type='bool'),\n keep_newer = dict(required=False, default=False, type='bool'),\n exclude = dict(required=False, default=[], type='list'),\n extra_opts = dict(required=False, default=[], type='list'),\n validate_certs = dict(required=False, default=True, type='bool'),\n ),\n add_file_common_args = True,\n# supports_check_mode = True,\n )\n\n # We screenscrape a huge amount of commands so use C locale anytime we do\n module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')\n\n src = os.path.expanduser(module.params['src'])\n dest = os.path.expanduser(module.params['dest'])\n copy = module.params['copy']\n file_args = module.load_file_common_arguments(module.params)\n # did tar file arrive?\n if not os.path.exists(src):\n if copy:\n module.fail_json(msg=\"Source '%s' failed to transfer\" % src)\n # If copy=false, and src= contains ://, try and download the file to a temp directory.\n elif '://' in src:\n tempdir = os.path.dirname(os.path.realpath(__file__))\n package = os.path.join(tempdir, str(src.rsplit('/', 1)[1]))\n try:\n rsp, info = fetch_url(module, src)\n # If download fails, raise a proper exception\n if rsp is None:\n raise Exception(info['msg'])\n f = open(package, 'w')\n # Read 1kb at a time to save on ram\n while True:\n data = rsp.read(BUFSIZE)\n\n if data == \"\":\n break # End of file, break while loop\n\n f.write(data)\n f.close()\n src = package\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Failure downloading %s, %s\" % (src, e))\n else:\n module.fail_json(msg=\"Source '%s' does not exist\" % src)\n if not os.access(src, os.R_OK):\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # skip working with 0 size archives\n try:\n if os.path.getsize(src) == 0:\n module.fail_json(msg=\"Invalid archive '%s', the file is 0 bytes\" % src)\n except Exception:\n e = get_exception()\n module.fail_json(msg=\"Source '%s' not readable\" % src)\n\n # is dest OK to receive tar file?\n if not os.path.isdir(dest):\n module.fail_json(msg=\"Destination '%s' is not a directory\" % dest)\n\n handler = pick_handler(src, dest, file_args, module)\n\n res_args = dict(handler=handler.__class__.__name__, dest=dest, src=src)\n\n # do we need to do unpack?\n check_results = handler.is_unarchived()\n\n # DEBUG\n# res_args['check_results'] = check_results\n\n if check_results['unarchived']:\n res_args['changed'] = False\n else:\n # do the unpack\n try:\n res_args['extract_results'] = handler.unarchive()\n if res_args['extract_results']['rc'] != 0:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n except IOError:\n module.fail_json(msg=\"failed to unpack %s to %s\" % (src, dest), **res_args)\n else:\n res_args['changed'] = True\n\n if check_results.get('diff', False):\n res_args['diff'] = { 'prepared': check_results['diff'] }\n\n # Run only if we found differences (idempotence) or diff was missing\n if res_args.get('diff', True):\n # do we need to change perms?\n for filename in handler.files_in_archive:\n file_args['path'] = os.path.join(dest, filename)\n try:\n res_args['changed'] = module.set_fs_attributes_if_different(file_args, res_args['changed'])\n except (IOError, OSError):\n e = get_exception()\n module.fail_json(msg=\"Unexpected error when accessing exploded file: %s\" % str(e), **res_args)\n\n if module.params['list_files']:\n res_args['files'] = handler.files_in_archive\n\n module.exit_json(**res_args)\n\n# import module snippets\nfrom ansible.module_utils.basic import *\nfrom ansible.module_utils.urls import *\nif __name__ == '__main__':\n main()\n",
"path": "files/unarchive.py"
}
] | diff --git a/files/unarchive.py b/files/unarchive.py
index 2c495abc60b..6492af60633 100644
--- a/files/unarchive.py
+++ b/files/unarchive.py
@@ -701,6 +701,9 @@ def main():
# supports_check_mode = True,
)
+ # We screenscrape a huge amount of commands so use C locale anytime we do
+ module.run_command_environ_update = dict(LANG='C', LC_ALL='C', LC_MESSAGES='C', LC_CTYPE='C')
+
src = os.path.expanduser(module.params['src'])
dest = os.path.expanduser(module.params['dest'])
copy = module.params['copy']
| unarchive issue with ansible 2.1 rc3
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
unarchive
##### ANSIBLE VERSION
```
ansible-playbook 2.1.0.0
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/yannig/dev/ansible-conf/ansible/playbooks/library']
```
##### CONFIGURATION
None
##### OS / ENVIRONMENT
Nothing special
##### SUMMARY
When using unarchive with Ansible 2.1 rc3, I get an error. Using ansible devel or ansible 2.0 work.
##### STEPS TO REPRODUCE
Get unarchive.yml at the following location: https://github.com/Yannig/yannig-ansible-playbooks/blob/master/unarchive/unarchive.yml
And run it.
```
ansible-playbook unarchive.yml
```
##### EXPECTED RESULTS
```
PLAY [Unarchive problem] *******************************************************
TASK [file] ********************************************************************
changed: [localhost]
TASK [file] ********************************************************************
changed: [localhost]
TASK [get_url] *****************************************************************
changed: [localhost]
TASK [unarchive] ***************************************************************
changed: [localhost]
PLAY RECAP *********************************************************************
localhost : ok=4 changed=4 unreachable=0 failed=0
```
##### ACTUAL RESULTS
```
PLAY [Unarchive problem] *******************************************************
TASK [file] ********************************************************************
changed: [localhost]
TASK [file] ********************************************************************
changed: [localhost]
TASK [get_url] *****************************************************************
ok: [localhost]
TASK [unarchive] ***************************************************************
fatal: [localhost]: FAILED! => {"changed": false, "failed": true, "msg": "Unexpected error when accessing exploded file: [Errno 2] Aucun fichier ou dossier de ce type: '/tmp/unarchive/apache-tomee-plus-1.7.4/webapps'"}
NO MORE HOSTS LEFT *************************************************************
to retry, use: --limit @unarchive.retry
PLAY RECAP *********************************************************************
localhost : ok=3 changed=2 unreachable=0 failed=1
```
Note: the devel version is not concerned about this issue.
|
matrix-org__synapse-6563 | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport calendar\nimport logging\nimport time\n\nfrom synapse.api.constants import PresenceState\nfrom synapse.storage.database import Database\nfrom synapse.storage.engines import PostgresEngine\nfrom synapse.storage.util.id_generators import (\n ChainedIdGenerator,\n IdGenerator,\n StreamIdGenerator,\n)\nfrom synapse.util.caches.stream_change_cache import StreamChangeCache\n\nfrom .account_data import AccountDataStore\nfrom .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore\nfrom .cache import CacheInvalidationStore\nfrom .client_ips import ClientIpStore\nfrom .deviceinbox import DeviceInboxStore\nfrom .devices import DeviceStore\nfrom .directory import DirectoryStore\nfrom .e2e_room_keys import EndToEndRoomKeyStore\nfrom .end_to_end_keys import EndToEndKeyStore\nfrom .event_federation import EventFederationStore\nfrom .event_push_actions import EventPushActionsStore\nfrom .events import EventsStore\nfrom .events_bg_updates import EventsBackgroundUpdatesStore\nfrom .filtering import FilteringStore\nfrom .group_server import GroupServerStore\nfrom .keys import KeyStore\nfrom .media_repository import MediaRepositoryStore\nfrom .monthly_active_users import MonthlyActiveUsersStore\nfrom .openid import OpenIdStore\nfrom .presence import PresenceStore, UserPresenceState\nfrom .profile import ProfileStore\nfrom .push_rule import PushRuleStore\nfrom .pusher import PusherStore\nfrom .receipts import ReceiptsStore\nfrom .registration import RegistrationStore\nfrom .rejections import RejectionsStore\nfrom .relations import RelationsStore\nfrom .room import RoomStore\nfrom .roommember import RoomMemberStore\nfrom .search import SearchStore\nfrom .signatures import SignatureStore\nfrom .state import StateStore\nfrom .stats import StatsStore\nfrom .stream import StreamStore\nfrom .tags import TagsStore\nfrom .transactions import TransactionStore\nfrom .user_directory import UserDirectoryStore\nfrom .user_erasure_store import UserErasureStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataStore(\n EventsBackgroundUpdatesStore,\n RoomMemberStore,\n RoomStore,\n RegistrationStore,\n StreamStore,\n ProfileStore,\n PresenceStore,\n TransactionStore,\n DirectoryStore,\n KeyStore,\n StateStore,\n SignatureStore,\n ApplicationServiceStore,\n EventsStore,\n EventFederationStore,\n MediaRepositoryStore,\n RejectionsStore,\n FilteringStore,\n PusherStore,\n PushRuleStore,\n ApplicationServiceTransactionStore,\n ReceiptsStore,\n EndToEndKeyStore,\n EndToEndRoomKeyStore,\n SearchStore,\n TagsStore,\n AccountDataStore,\n EventPushActionsStore,\n OpenIdStore,\n ClientIpStore,\n DeviceStore,\n DeviceInboxStore,\n UserDirectoryStore,\n GroupServerStore,\n UserErasureStore,\n MonthlyActiveUsersStore,\n StatsStore,\n RelationsStore,\n CacheInvalidationStore,\n):\n def __init__(self, database: Database, db_conn, hs):\n self.hs = hs\n self._clock = hs.get_clock()\n self.database_engine = database.engine\n\n all_users_native = are_all_users_on_domain(\n db_conn.cursor(), database.engine, hs.hostname\n )\n if not all_users_native:\n raise Exception(\n \"Found users in database not native to %s!\\n\"\n \"You cannot changed a synapse server_name after it's been configured\"\n % (hs.hostname,)\n )\n\n self._stream_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n extra_tables=[(\"local_invites\", \"stream_id\")],\n )\n self._backfill_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n step=-1,\n extra_tables=[(\"ex_outlier_stream\", \"event_stream_ordering\")],\n )\n self._presence_id_gen = StreamIdGenerator(\n db_conn, \"presence_stream\", \"stream_id\"\n )\n self._device_inbox_id_gen = StreamIdGenerator(\n db_conn, \"device_max_stream_id\", \"stream_id\"\n )\n self._public_room_id_gen = StreamIdGenerator(\n db_conn, \"public_room_list_stream\", \"stream_id\"\n )\n self._device_list_id_gen = StreamIdGenerator(\n db_conn,\n \"device_lists_stream\",\n \"stream_id\",\n extra_tables=[(\"user_signature_stream\", \"stream_id\")],\n )\n self._cross_signing_id_gen = StreamIdGenerator(\n db_conn, \"e2e_cross_signing_keys\", \"stream_id\"\n )\n\n self._access_tokens_id_gen = IdGenerator(db_conn, \"access_tokens\", \"id\")\n self._event_reports_id_gen = IdGenerator(db_conn, \"event_reports\", \"id\")\n self._push_rule_id_gen = IdGenerator(db_conn, \"push_rules\", \"id\")\n self._push_rules_enable_id_gen = IdGenerator(db_conn, \"push_rules_enable\", \"id\")\n self._push_rules_stream_id_gen = ChainedIdGenerator(\n self._stream_id_gen, db_conn, \"push_rules_stream\", \"stream_id\"\n )\n self._pushers_id_gen = StreamIdGenerator(\n db_conn, \"pushers\", \"id\", extra_tables=[(\"deleted_pushers\", \"stream_id\")]\n )\n self._group_updates_id_gen = StreamIdGenerator(\n db_conn, \"local_group_updates\", \"stream_id\"\n )\n\n if isinstance(self.database_engine, PostgresEngine):\n self._cache_id_gen = StreamIdGenerator(\n db_conn, \"cache_invalidation_stream\", \"stream_id\"\n )\n else:\n self._cache_id_gen = None\n\n super(DataStore, self).__init__(database, db_conn, hs)\n\n self._presence_on_startup = self._get_active_presence(db_conn)\n\n presence_cache_prefill, min_presence_val = self.db.get_cache_dict(\n db_conn,\n \"presence_stream\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._presence_id_gen.get_current_token(),\n )\n self.presence_stream_cache = StreamChangeCache(\n \"PresenceStreamChangeCache\",\n min_presence_val,\n prefilled_cache=presence_cache_prefill,\n )\n\n max_device_inbox_id = self._device_inbox_id_gen.get_current_token()\n device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_inbox\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_inbox_stream_cache = StreamChangeCache(\n \"DeviceInboxStreamChangeCache\",\n min_device_inbox_id,\n prefilled_cache=device_inbox_prefill,\n )\n # The federation outbox and the local device inbox uses the same\n # stream_id generator.\n device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_federation_outbox\",\n entity_column=\"destination\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_federation_outbox_stream_cache = StreamChangeCache(\n \"DeviceFederationOutboxStreamChangeCache\",\n min_device_outbox_id,\n prefilled_cache=device_outbox_prefill,\n )\n\n device_list_max = self._device_list_id_gen.get_current_token()\n self._device_list_stream_cache = StreamChangeCache(\n \"DeviceListStreamChangeCache\", device_list_max\n )\n self._user_signature_stream_cache = StreamChangeCache(\n \"UserSignatureStreamChangeCache\", device_list_max\n )\n self._device_list_federation_stream_cache = StreamChangeCache(\n \"DeviceListFederationStreamChangeCache\", device_list_max\n )\n\n events_max = self._stream_id_gen.get_current_token()\n curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(\n db_conn,\n \"current_state_delta_stream\",\n entity_column=\"room_id\",\n stream_column=\"stream_id\",\n max_value=events_max, # As we share the stream id with events token\n limit=1000,\n )\n self._curr_state_delta_stream_cache = StreamChangeCache(\n \"_curr_state_delta_stream_cache\",\n min_curr_state_delta_id,\n prefilled_cache=curr_state_delta_prefill,\n )\n\n _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(\n db_conn,\n \"local_group_updates\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._group_updates_id_gen.get_current_token(),\n limit=1000,\n )\n self._group_updates_stream_cache = StreamChangeCache(\n \"_group_updates_stream_cache\",\n min_group_updates_id,\n prefilled_cache=_group_updates_prefill,\n )\n\n self._stream_order_on_start = self.get_room_max_stream_ordering()\n self._min_stream_order_on_start = self.get_room_min_stream_ordering()\n\n # Used in _generate_user_daily_visits to keep track of progress\n self._last_user_visit_update = self._get_start_of_day()\n\n def take_presence_startup_info(self):\n active_on_startup = self._presence_on_startup\n self._presence_on_startup = None\n return active_on_startup\n\n def _get_active_presence(self, db_conn):\n \"\"\"Fetch non-offline presence from the database so that we can register\n the appropriate time outs.\n \"\"\"\n\n sql = (\n \"SELECT user_id, state, last_active_ts, last_federation_update_ts,\"\n \" last_user_sync_ts, status_msg, currently_active FROM presence_stream\"\n \" WHERE state != ?\"\n )\n sql = self.database_engine.convert_param_style(sql)\n\n txn = db_conn.cursor()\n txn.execute(sql, (PresenceState.OFFLINE,))\n rows = self.db.cursor_to_dict(txn)\n txn.close()\n\n for row in rows:\n row[\"currently_active\"] = bool(row[\"currently_active\"])\n\n return [UserPresenceState(**row) for row in rows]\n\n def count_daily_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 24 hours.\n \"\"\"\n yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)\n return self.db.runInteraction(\"count_daily_users\", self._count_users, yesterday)\n\n def count_monthly_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 30 days.\n Note this method is intended for phonehome metrics only and is different\n from the mau figure in synapse.storage.monthly_active_users which,\n amongst other things, includes a 3 day grace period before a user counts.\n \"\"\"\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return self.db.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )\n\n def _count_users(self, txn, time_from):\n \"\"\"\n Returns number of users seen in the past time_from period\n \"\"\"\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT user_id FROM user_ips\n WHERE last_seen > ?\n GROUP BY user_id\n ) u\n \"\"\"\n txn.execute(sql, (time_from,))\n (count,) = txn.fetchone()\n return count\n\n def count_r30_users(self):\n \"\"\"\n Counts the number of 30 day retained users, defined as:-\n * Users who have created their accounts more than 30 days ago\n * Where last seen at most 30 days ago\n * Where account creation and last_seen are > 30 days apart\n\n Returns counts globaly for a given user as well as breaking\n by platform\n \"\"\"\n\n def _count_r30_users(txn):\n thirty_days_in_secs = 86400 * 30\n now = int(self._clock.time())\n thirty_days_ago_in_secs = now - thirty_days_in_secs\n\n sql = \"\"\"\n SELECT platform, COALESCE(count(*), 0) FROM (\n SELECT\n users.name, platform, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen,\n CASE\n WHEN user_agent LIKE '%%Android%%' THEN 'android'\n WHEN user_agent LIKE '%%iOS%%' THEN 'ios'\n WHEN user_agent LIKE '%%Electron%%' THEN 'electron'\n WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'\n WHEN user_agent LIKE '%%Gecko%%' THEN 'web'\n ELSE 'unknown'\n END\n AS platform\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND users.appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, platform, users.creation_ts\n ) u GROUP BY platform\n \"\"\"\n\n results = {}\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n for row in txn:\n if row[0] == \"unknown\":\n pass\n results[row[0]] = row[1]\n\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT users.name, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, users.creation_ts\n ) u\n \"\"\"\n\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n (count,) = txn.fetchone()\n results[\"all\"] = count\n\n return results\n\n return self.db.runInteraction(\"count_r30_users\", _count_r30_users)\n\n def _get_start_of_day(self):\n \"\"\"\n Returns millisecond unixtime for start of UTC day.\n \"\"\"\n now = time.gmtime()\n today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))\n return today_start * 1000\n\n def generate_user_daily_visits(self):\n \"\"\"\n Generates daily visit data for use in cohort/ retention analysis\n \"\"\"\n\n def _generate_user_daily_visits(txn):\n logger.info(\"Calling _generate_user_daily_visits\")\n today_start = self._get_start_of_day()\n a_day_in_milliseconds = 24 * 60 * 60 * 1000\n now = self.clock.time_msec()\n\n sql = \"\"\"\n INSERT INTO user_daily_visits (user_id, device_id, timestamp)\n SELECT u.user_id, u.device_id, ?\n FROM user_ips AS u\n LEFT JOIN (\n SELECT user_id, device_id, timestamp FROM user_daily_visits\n WHERE timestamp = ?\n ) udv\n ON u.user_id = udv.user_id AND u.device_id=udv.device_id\n INNER JOIN users ON users.name=u.user_id\n WHERE last_seen > ? AND last_seen <= ?\n AND udv.timestamp IS NULL AND users.is_guest=0\n AND users.appservice_id IS NULL\n GROUP BY u.user_id, u.device_id\n \"\"\"\n\n # This means that the day has rolled over but there could still\n # be entries from the previous day. There is an edge case\n # where if the user logs in at 23:59 and overwrites their\n # last_seen at 00:01 then they will not be counted in the\n # previous day's stats - it is important that the query is run\n # often to minimise this case.\n if today_start > self._last_user_visit_update:\n yesterday_start = today_start - a_day_in_milliseconds\n txn.execute(\n sql,\n (\n yesterday_start,\n yesterday_start,\n self._last_user_visit_update,\n today_start,\n ),\n )\n self._last_user_visit_update = today_start\n\n txn.execute(\n sql, (today_start, today_start, self._last_user_visit_update, now)\n )\n # Update _last_user_visit_update to now. The reason to do this\n # rather just clamping to the beginning of the day is to limit\n # the size of the join - meaning that the query can be run more\n # frequently\n self._last_user_visit_update = now\n\n return self.db.runInteraction(\n \"generate_user_daily_visits\", _generate_user_daily_visits\n )\n\n def get_users(self):\n \"\"\"Function to retrieve a list of users in users table.\n\n Args:\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_select_list(\n table=\"users\",\n keyvalues={},\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n desc=\"get_users\",\n )\n\n def get_users_paginate(\n self, start, limit, name=None, guests=True, deactivated=False\n ):\n \"\"\"Function to retrieve a paginated list of users from\n users list. This will return a json list of users.\n\n Args:\n start (int): start number to begin the query from\n limit (int): number of rows to retrieve\n name (string): filter for user names\n guests (bool): whether to in include guest users\n deactivated (bool): whether to include deactivated users\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n name_filter = {}\n if name:\n name_filter[\"name\"] = \"%\" + name + \"%\"\n\n attr_filter = {}\n if not guests:\n attr_filter[\"is_guest\"] = False\n if not deactivated:\n attr_filter[\"deactivated\"] = False\n\n return self.db.simple_select_list_paginate(\n desc=\"get_users_paginate\",\n table=\"users\",\n orderby=\"name\",\n start=start,\n limit=limit,\n filters=name_filter,\n keyvalues=attr_filter,\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n )\n\n def search_users(self, term):\n \"\"\"Function to search users list for one or more users with\n the matched term.\n\n Args:\n term (str): search term\n col (str): column to query term should be matched to\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_search_list(\n table=\"users\",\n term=term,\n col=\"name\",\n retcols=[\"name\", \"password_hash\", \"is_guest\", \"admin\", \"user_type\"],\n desc=\"search_users\",\n )\n\n\ndef are_all_users_on_domain(txn, database_engine, domain):\n sql = database_engine.convert_param_style(\n \"SELECT COUNT(*) FROM users WHERE name NOT LIKE ?\"\n )\n pat = \"%:\" + domain\n txn.execute(sql, (pat,))\n num_not_matching = txn.fetchall()[0][0]\n if num_not_matching == 0:\n return True\n return False\n",
"path": "synapse/storage/data_stores/main/__init__.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2014-2016 OpenMarket Ltd\n# Copyright 2018 New Vector Ltd\n# Copyright 2019 The Matrix.org Foundation C.I.C.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport calendar\nimport logging\nimport time\n\nfrom synapse.api.constants import PresenceState\nfrom synapse.storage.database import Database\nfrom synapse.storage.engines import PostgresEngine\nfrom synapse.storage.util.id_generators import (\n ChainedIdGenerator,\n IdGenerator,\n StreamIdGenerator,\n)\nfrom synapse.util.caches.stream_change_cache import StreamChangeCache\n\nfrom .account_data import AccountDataStore\nfrom .appservice import ApplicationServiceStore, ApplicationServiceTransactionStore\nfrom .cache import CacheInvalidationStore\nfrom .client_ips import ClientIpStore\nfrom .deviceinbox import DeviceInboxStore\nfrom .devices import DeviceStore\nfrom .directory import DirectoryStore\nfrom .e2e_room_keys import EndToEndRoomKeyStore\nfrom .end_to_end_keys import EndToEndKeyStore\nfrom .event_federation import EventFederationStore\nfrom .event_push_actions import EventPushActionsStore\nfrom .events import EventsStore\nfrom .events_bg_updates import EventsBackgroundUpdatesStore\nfrom .filtering import FilteringStore\nfrom .group_server import GroupServerStore\nfrom .keys import KeyStore\nfrom .media_repository import MediaRepositoryStore\nfrom .monthly_active_users import MonthlyActiveUsersStore\nfrom .openid import OpenIdStore\nfrom .presence import PresenceStore, UserPresenceState\nfrom .profile import ProfileStore\nfrom .push_rule import PushRuleStore\nfrom .pusher import PusherStore\nfrom .receipts import ReceiptsStore\nfrom .registration import RegistrationStore\nfrom .rejections import RejectionsStore\nfrom .relations import RelationsStore\nfrom .room import RoomStore\nfrom .roommember import RoomMemberStore\nfrom .search import SearchStore\nfrom .signatures import SignatureStore\nfrom .state import StateStore\nfrom .stats import StatsStore\nfrom .stream import StreamStore\nfrom .tags import TagsStore\nfrom .transactions import TransactionStore\nfrom .user_directory import UserDirectoryStore\nfrom .user_erasure_store import UserErasureStore\n\nlogger = logging.getLogger(__name__)\n\n\nclass DataStore(\n EventsBackgroundUpdatesStore,\n RoomMemberStore,\n RoomStore,\n RegistrationStore,\n StreamStore,\n ProfileStore,\n PresenceStore,\n TransactionStore,\n DirectoryStore,\n KeyStore,\n StateStore,\n SignatureStore,\n ApplicationServiceStore,\n EventsStore,\n EventFederationStore,\n MediaRepositoryStore,\n RejectionsStore,\n FilteringStore,\n PusherStore,\n PushRuleStore,\n ApplicationServiceTransactionStore,\n ReceiptsStore,\n EndToEndKeyStore,\n EndToEndRoomKeyStore,\n SearchStore,\n TagsStore,\n AccountDataStore,\n EventPushActionsStore,\n OpenIdStore,\n ClientIpStore,\n DeviceStore,\n DeviceInboxStore,\n UserDirectoryStore,\n GroupServerStore,\n UserErasureStore,\n MonthlyActiveUsersStore,\n StatsStore,\n RelationsStore,\n CacheInvalidationStore,\n):\n def __init__(self, database: Database, db_conn, hs):\n self.hs = hs\n self._clock = hs.get_clock()\n self.database_engine = database.engine\n\n all_users_native = are_all_users_on_domain(\n db_conn.cursor(), database.engine, hs.hostname\n )\n if not all_users_native:\n raise Exception(\n \"Found users in database not native to %s!\\n\"\n \"You cannot changed a synapse server_name after it's been configured\"\n % (hs.hostname,)\n )\n\n self._stream_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n extra_tables=[(\"local_invites\", \"stream_id\")],\n )\n self._backfill_id_gen = StreamIdGenerator(\n db_conn,\n \"events\",\n \"stream_ordering\",\n step=-1,\n extra_tables=[(\"ex_outlier_stream\", \"event_stream_ordering\")],\n )\n self._presence_id_gen = StreamIdGenerator(\n db_conn, \"presence_stream\", \"stream_id\"\n )\n self._device_inbox_id_gen = StreamIdGenerator(\n db_conn, \"device_max_stream_id\", \"stream_id\"\n )\n self._public_room_id_gen = StreamIdGenerator(\n db_conn, \"public_room_list_stream\", \"stream_id\"\n )\n self._device_list_id_gen = StreamIdGenerator(\n db_conn,\n \"device_lists_stream\",\n \"stream_id\",\n extra_tables=[(\"user_signature_stream\", \"stream_id\")],\n )\n self._cross_signing_id_gen = StreamIdGenerator(\n db_conn, \"e2e_cross_signing_keys\", \"stream_id\"\n )\n\n self._access_tokens_id_gen = IdGenerator(db_conn, \"access_tokens\", \"id\")\n self._event_reports_id_gen = IdGenerator(db_conn, \"event_reports\", \"id\")\n self._push_rule_id_gen = IdGenerator(db_conn, \"push_rules\", \"id\")\n self._push_rules_enable_id_gen = IdGenerator(db_conn, \"push_rules_enable\", \"id\")\n self._push_rules_stream_id_gen = ChainedIdGenerator(\n self._stream_id_gen, db_conn, \"push_rules_stream\", \"stream_id\"\n )\n self._pushers_id_gen = StreamIdGenerator(\n db_conn, \"pushers\", \"id\", extra_tables=[(\"deleted_pushers\", \"stream_id\")]\n )\n self._group_updates_id_gen = StreamIdGenerator(\n db_conn, \"local_group_updates\", \"stream_id\"\n )\n\n if isinstance(self.database_engine, PostgresEngine):\n self._cache_id_gen = StreamIdGenerator(\n db_conn, \"cache_invalidation_stream\", \"stream_id\"\n )\n else:\n self._cache_id_gen = None\n\n super(DataStore, self).__init__(database, db_conn, hs)\n\n self._presence_on_startup = self._get_active_presence(db_conn)\n\n presence_cache_prefill, min_presence_val = self.db.get_cache_dict(\n db_conn,\n \"presence_stream\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._presence_id_gen.get_current_token(),\n )\n self.presence_stream_cache = StreamChangeCache(\n \"PresenceStreamChangeCache\",\n min_presence_val,\n prefilled_cache=presence_cache_prefill,\n )\n\n max_device_inbox_id = self._device_inbox_id_gen.get_current_token()\n device_inbox_prefill, min_device_inbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_inbox\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_inbox_stream_cache = StreamChangeCache(\n \"DeviceInboxStreamChangeCache\",\n min_device_inbox_id,\n prefilled_cache=device_inbox_prefill,\n )\n # The federation outbox and the local device inbox uses the same\n # stream_id generator.\n device_outbox_prefill, min_device_outbox_id = self.db.get_cache_dict(\n db_conn,\n \"device_federation_outbox\",\n entity_column=\"destination\",\n stream_column=\"stream_id\",\n max_value=max_device_inbox_id,\n limit=1000,\n )\n self._device_federation_outbox_stream_cache = StreamChangeCache(\n \"DeviceFederationOutboxStreamChangeCache\",\n min_device_outbox_id,\n prefilled_cache=device_outbox_prefill,\n )\n\n device_list_max = self._device_list_id_gen.get_current_token()\n self._device_list_stream_cache = StreamChangeCache(\n \"DeviceListStreamChangeCache\", device_list_max\n )\n self._user_signature_stream_cache = StreamChangeCache(\n \"UserSignatureStreamChangeCache\", device_list_max\n )\n self._device_list_federation_stream_cache = StreamChangeCache(\n \"DeviceListFederationStreamChangeCache\", device_list_max\n )\n\n events_max = self._stream_id_gen.get_current_token()\n curr_state_delta_prefill, min_curr_state_delta_id = self.db.get_cache_dict(\n db_conn,\n \"current_state_delta_stream\",\n entity_column=\"room_id\",\n stream_column=\"stream_id\",\n max_value=events_max, # As we share the stream id with events token\n limit=1000,\n )\n self._curr_state_delta_stream_cache = StreamChangeCache(\n \"_curr_state_delta_stream_cache\",\n min_curr_state_delta_id,\n prefilled_cache=curr_state_delta_prefill,\n )\n\n _group_updates_prefill, min_group_updates_id = self.db.get_cache_dict(\n db_conn,\n \"local_group_updates\",\n entity_column=\"user_id\",\n stream_column=\"stream_id\",\n max_value=self._group_updates_id_gen.get_current_token(),\n limit=1000,\n )\n self._group_updates_stream_cache = StreamChangeCache(\n \"_group_updates_stream_cache\",\n min_group_updates_id,\n prefilled_cache=_group_updates_prefill,\n )\n\n self._stream_order_on_start = self.get_room_max_stream_ordering()\n self._min_stream_order_on_start = self.get_room_min_stream_ordering()\n\n # Used in _generate_user_daily_visits to keep track of progress\n self._last_user_visit_update = self._get_start_of_day()\n\n def take_presence_startup_info(self):\n active_on_startup = self._presence_on_startup\n self._presence_on_startup = None\n return active_on_startup\n\n def _get_active_presence(self, db_conn):\n \"\"\"Fetch non-offline presence from the database so that we can register\n the appropriate time outs.\n \"\"\"\n\n sql = (\n \"SELECT user_id, state, last_active_ts, last_federation_update_ts,\"\n \" last_user_sync_ts, status_msg, currently_active FROM presence_stream\"\n \" WHERE state != ?\"\n )\n sql = self.database_engine.convert_param_style(sql)\n\n txn = db_conn.cursor()\n txn.execute(sql, (PresenceState.OFFLINE,))\n rows = self.db.cursor_to_dict(txn)\n txn.close()\n\n for row in rows:\n row[\"currently_active\"] = bool(row[\"currently_active\"])\n\n return [UserPresenceState(**row) for row in rows]\n\n def count_daily_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 24 hours.\n \"\"\"\n yesterday = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24)\n return self.db.runInteraction(\"count_daily_users\", self._count_users, yesterday)\n\n def count_monthly_users(self):\n \"\"\"\n Counts the number of users who used this homeserver in the last 30 days.\n Note this method is intended for phonehome metrics only and is different\n from the mau figure in synapse.storage.monthly_active_users which,\n amongst other things, includes a 3 day grace period before a user counts.\n \"\"\"\n thirty_days_ago = int(self._clock.time_msec()) - (1000 * 60 * 60 * 24 * 30)\n return self.db.runInteraction(\n \"count_monthly_users\", self._count_users, thirty_days_ago\n )\n\n def _count_users(self, txn, time_from):\n \"\"\"\n Returns number of users seen in the past time_from period\n \"\"\"\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT user_id FROM user_ips\n WHERE last_seen > ?\n GROUP BY user_id\n ) u\n \"\"\"\n txn.execute(sql, (time_from,))\n (count,) = txn.fetchone()\n return count\n\n def count_r30_users(self):\n \"\"\"\n Counts the number of 30 day retained users, defined as:-\n * Users who have created their accounts more than 30 days ago\n * Where last seen at most 30 days ago\n * Where account creation and last_seen are > 30 days apart\n\n Returns counts globaly for a given user as well as breaking\n by platform\n \"\"\"\n\n def _count_r30_users(txn):\n thirty_days_in_secs = 86400 * 30\n now = int(self._clock.time())\n thirty_days_ago_in_secs = now - thirty_days_in_secs\n\n sql = \"\"\"\n SELECT platform, COALESCE(count(*), 0) FROM (\n SELECT\n users.name, platform, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen,\n CASE\n WHEN user_agent LIKE '%%Android%%' THEN 'android'\n WHEN user_agent LIKE '%%iOS%%' THEN 'ios'\n WHEN user_agent LIKE '%%Electron%%' THEN 'electron'\n WHEN user_agent LIKE '%%Mozilla%%' THEN 'web'\n WHEN user_agent LIKE '%%Gecko%%' THEN 'web'\n ELSE 'unknown'\n END\n AS platform\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND users.appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, platform, users.creation_ts\n ) u GROUP BY platform\n \"\"\"\n\n results = {}\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n for row in txn:\n if row[0] == \"unknown\":\n pass\n results[row[0]] = row[1]\n\n sql = \"\"\"\n SELECT COALESCE(count(*), 0) FROM (\n SELECT users.name, users.creation_ts * 1000,\n MAX(uip.last_seen)\n FROM users\n INNER JOIN (\n SELECT\n user_id,\n last_seen\n FROM user_ips\n ) uip\n ON users.name = uip.user_id\n AND appservice_id is NULL\n AND users.creation_ts < ?\n AND uip.last_seen/1000 > ?\n AND (uip.last_seen/1000) - users.creation_ts > 86400 * 30\n GROUP BY users.name, users.creation_ts\n ) u\n \"\"\"\n\n txn.execute(sql, (thirty_days_ago_in_secs, thirty_days_ago_in_secs))\n\n (count,) = txn.fetchone()\n results[\"all\"] = count\n\n return results\n\n return self.db.runInteraction(\"count_r30_users\", _count_r30_users)\n\n def _get_start_of_day(self):\n \"\"\"\n Returns millisecond unixtime for start of UTC day.\n \"\"\"\n now = time.gmtime()\n today_start = calendar.timegm((now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0))\n return today_start * 1000\n\n def generate_user_daily_visits(self):\n \"\"\"\n Generates daily visit data for use in cohort/ retention analysis\n \"\"\"\n\n def _generate_user_daily_visits(txn):\n logger.info(\"Calling _generate_user_daily_visits\")\n today_start = self._get_start_of_day()\n a_day_in_milliseconds = 24 * 60 * 60 * 1000\n now = self.clock.time_msec()\n\n sql = \"\"\"\n INSERT INTO user_daily_visits (user_id, device_id, timestamp)\n SELECT u.user_id, u.device_id, ?\n FROM user_ips AS u\n LEFT JOIN (\n SELECT user_id, device_id, timestamp FROM user_daily_visits\n WHERE timestamp = ?\n ) udv\n ON u.user_id = udv.user_id AND u.device_id=udv.device_id\n INNER JOIN users ON users.name=u.user_id\n WHERE last_seen > ? AND last_seen <= ?\n AND udv.timestamp IS NULL AND users.is_guest=0\n AND users.appservice_id IS NULL\n GROUP BY u.user_id, u.device_id\n \"\"\"\n\n # This means that the day has rolled over but there could still\n # be entries from the previous day. There is an edge case\n # where if the user logs in at 23:59 and overwrites their\n # last_seen at 00:01 then they will not be counted in the\n # previous day's stats - it is important that the query is run\n # often to minimise this case.\n if today_start > self._last_user_visit_update:\n yesterday_start = today_start - a_day_in_milliseconds\n txn.execute(\n sql,\n (\n yesterday_start,\n yesterday_start,\n self._last_user_visit_update,\n today_start,\n ),\n )\n self._last_user_visit_update = today_start\n\n txn.execute(\n sql, (today_start, today_start, self._last_user_visit_update, now)\n )\n # Update _last_user_visit_update to now. The reason to do this\n # rather just clamping to the beginning of the day is to limit\n # the size of the join - meaning that the query can be run more\n # frequently\n self._last_user_visit_update = now\n\n return self.db.runInteraction(\n \"generate_user_daily_visits\", _generate_user_daily_visits\n )\n\n def get_users(self):\n \"\"\"Function to retrieve a list of users in users table.\n\n Args:\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_select_list(\n table=\"users\",\n keyvalues={},\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n desc=\"get_users\",\n )\n\n def get_users_paginate(\n self, start, limit, name=None, guests=True, deactivated=False\n ):\n \"\"\"Function to retrieve a paginated list of users from\n users list. This will return a json list of users.\n\n Args:\n start (int): start number to begin the query from\n limit (int): number of rows to retrieve\n name (string): filter for user names\n guests (bool): whether to in include guest users\n deactivated (bool): whether to include deactivated users\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n name_filter = {}\n if name:\n name_filter[\"name\"] = \"%\" + name + \"%\"\n\n attr_filter = {}\n if not guests:\n attr_filter[\"is_guest\"] = 0\n if not deactivated:\n attr_filter[\"deactivated\"] = 0\n\n return self.db.simple_select_list_paginate(\n desc=\"get_users_paginate\",\n table=\"users\",\n orderby=\"name\",\n start=start,\n limit=limit,\n filters=name_filter,\n keyvalues=attr_filter,\n retcols=[\n \"name\",\n \"password_hash\",\n \"is_guest\",\n \"admin\",\n \"user_type\",\n \"deactivated\",\n ],\n )\n\n def search_users(self, term):\n \"\"\"Function to search users list for one or more users with\n the matched term.\n\n Args:\n term (str): search term\n col (str): column to query term should be matched to\n Returns:\n defer.Deferred: resolves to list[dict[str, Any]]\n \"\"\"\n return self.db.simple_search_list(\n table=\"users\",\n term=term,\n col=\"name\",\n retcols=[\"name\", \"password_hash\", \"is_guest\", \"admin\", \"user_type\"],\n desc=\"search_users\",\n )\n\n\ndef are_all_users_on_domain(txn, database_engine, domain):\n sql = database_engine.convert_param_style(\n \"SELECT COUNT(*) FROM users WHERE name NOT LIKE ?\"\n )\n pat = \"%:\" + domain\n txn.execute(sql, (pat,))\n num_not_matching = txn.fetchall()[0][0]\n if num_not_matching == 0:\n return True\n return False\n",
"path": "synapse/storage/data_stores/main/__init__.py"
}
] | diff --git a/changelog.d/6563.bugfix b/changelog.d/6563.bugfix
new file mode 100644
index 000000000000..3325fb1dcfda
--- /dev/null
+++ b/changelog.d/6563.bugfix
@@ -0,0 +1 @@
+Fix GET request on /_synapse/admin/v2/users endpoint. Contributed by Awesome Technologies Innovationslabor GmbH.
\ No newline at end of file
diff --git a/synapse/storage/data_stores/main/__init__.py b/synapse/storage/data_stores/main/__init__.py
index c577c0df5ffb..2700cca822f6 100644
--- a/synapse/storage/data_stores/main/__init__.py
+++ b/synapse/storage/data_stores/main/__init__.py
@@ -526,9 +526,9 @@ def get_users_paginate(
attr_filter = {}
if not guests:
- attr_filter["is_guest"] = False
+ attr_filter["is_guest"] = 0
if not deactivated:
- attr_filter["deactivated"] = False
+ attr_filter["deactivated"] = 0
return self.db.simple_select_list_paginate(
desc="get_users_paginate",
diff --git a/tests/rest/admin/test_admin.py b/tests/rest/admin/test_admin.py
index 0ed259438134..325bd6a6085a 100644
--- a/tests/rest/admin/test_admin.py
+++ b/tests/rest/admin/test_admin.py
@@ -341,6 +341,47 @@ def nonce():
self.assertEqual("Invalid user type", channel.json_body["error"])
+class UsersListTestCase(unittest.HomeserverTestCase):
+
+ servlets = [
+ synapse.rest.admin.register_servlets,
+ login.register_servlets,
+ ]
+ url = "/_synapse/admin/v2/users"
+
+ def prepare(self, reactor, clock, hs):
+ self.admin_user = self.register_user("admin", "pass", admin=True)
+ self.admin_user_tok = self.login("admin", "pass")
+
+ self.register_user("user1", "pass1", admin=False)
+ self.register_user("user2", "pass2", admin=False)
+
+ def test_no_auth(self):
+ """
+ Try to list users without authentication.
+ """
+ request, channel = self.make_request("GET", self.url, b"{}")
+ self.render(request)
+
+ self.assertEqual(401, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual("M_MISSING_TOKEN", channel.json_body["errcode"])
+
+ def test_all_users(self):
+ """
+ List all users, including deactivated users.
+ """
+ request, channel = self.make_request(
+ "GET",
+ self.url + "?deactivated=true",
+ b"{}",
+ access_token=self.admin_user_tok,
+ )
+ self.render(request)
+
+ self.assertEqual(200, int(channel.result["code"]), msg=channel.result["body"])
+ self.assertEqual(3, len(channel.json_body["users"]))
+
+
class ShutdownRoomTestCase(unittest.HomeserverTestCase):
servlets = [
synapse.rest.admin.register_servlets_for_client_rest_resource,
| /_synapse/admin/v2/users is broken
Running the /_synapse/admin/v2/users as documented produces an internal server error
Postgres logs:
```
STATEMENT: SELECT name, password_hash, is_guest, admin, user_type, deactivated FROM users WHERE is_guest = false AND deactivated = false ORDER BY name ASC LIMIT 10 OFFSET 0
ERROR: operator does not exist: smallint = boolean at character 95
```
`\d users` shows that `is_guest` and `deactivated` are type `smallint` not `boolean`.
```
Table "public.users"
Column | Type | Collation | Nullable | Default
----------------------------+----------+-----------+----------+---------
name | text | | |
password_hash | text | | |
creation_ts | bigint | | |
admin | smallint | | not null | 0
upgrade_ts | bigint | | |
is_guest | smallint | | not null | 0
appservice_id | text | | |
consent_version | text | | |
consent_server_notice_sent | text | | |
user_type | text | | |
deactivated | smallint | | not null | 0
```
|
keras-team__keras-core-348 | [
{
"content": "from keras_core import backend\nfrom keras_core.api_export import keras_core_export\nfrom keras_core.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras_core.utils import image_utils\n\n\n@keras_core_export(\"keras_core.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n",
"path": "keras_core/layers/preprocessing/center_crop.py"
}
] | [
{
"content": "from keras_core import backend\nfrom keras_core.api_export import keras_core_export\nfrom keras_core.layers.preprocessing.tf_data_layer import TFDataLayer\nfrom keras_core.utils import image_utils\n\n\n@keras_core_export(\"keras_core.layers.CenterCrop\")\nclass CenterCrop(TFDataLayer):\n \"\"\"A preprocessing layer which crops images.\n\n This layers crops the central portion of the images to a target size. If an\n image is smaller than the target size, it will be resized and cropped\n so as to return the largest possible window in the image that matches\n the target aspect ratio.\n\n Input pixel values can be of any range (e.g. `[0., 1.)` or `[0, 255]`).\n\n Input shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., height, width, channels)`, in `\"channels_last\"` format,\n or `(..., channels, height, width)`, in `\"channels_first\"` format.\n\n Output shape:\n 3D (unbatched) or 4D (batched) tensor with shape:\n `(..., target_height, target_width, channels)`,\n or `(..., channels, target_height, target_width)`,\n in `\"channels_first\"` format.\n\n If the input height/width is even and the target height/width is odd (or\n inversely), the input image is left-padded by 1 pixel.\n\n **Note:** This layer is safe to use inside a `tf.data` pipeline\n (independently of which backend you're using).\n\n Args:\n height: Integer, the height of the output shape.\n width: Integer, the width of the output shape.\n data_format: string, either `\"channels_last\"` or `\"channels_first\"`.\n The ordering of the dimensions in the inputs. `\"channels_last\"`\n corresponds to inputs with shape `(batch, height, width, channels)`\n while `\"channels_first\"` corresponds to inputs with shape\n `(batch, channels, height, width)`. It defaults to the\n `image_data_format` value found in your Keras config file at\n `~/.keras/keras.json`. If you never set it, then it will be\n `\"channels_last\"`.\n \"\"\"\n\n def __init__(self, height, width, data_format=None, **kwargs):\n super().__init__(**kwargs)\n self.height = height\n self.width = width\n self.data_format = backend.standardize_data_format(data_format)\n\n def call(self, inputs):\n inputs = self.backend.cast(inputs, self.compute_dtype)\n if self.data_format == \"channels_first\":\n init_height = inputs.shape[-2]\n init_width = inputs.shape[-1]\n else:\n init_height = inputs.shape[-3]\n init_width = inputs.shape[-2]\n\n if init_height is None or init_width is None:\n # Dynamic size case. TODO.\n raise ValueError(\n \"At this time, CenterCrop can only \"\n \"process images with a static spatial \"\n f\"shape. Received: inputs.shape={inputs.shape}\"\n )\n\n h_diff = init_height - self.height\n w_diff = init_width - self.width\n\n h_start = int(h_diff / 2)\n w_start = int(w_diff / 2)\n\n if h_diff >= 0 and w_diff >= 0:\n if len(inputs.shape) == 4:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n elif len(inputs.shape) == 3:\n if self.data_format == \"channels_first\":\n return inputs[\n :,\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n ]\n return inputs[\n h_start : h_start + self.height,\n w_start : w_start + self.width,\n :,\n ]\n\n return image_utils.smart_resize(\n inputs,\n [self.height, self.width],\n data_format=self.data_format,\n backend_module=self.backend,\n )\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n if len(input_shape) == 4:\n if self.data_format == \"channels_last\":\n input_shape[1] = self.height\n input_shape[2] = self.width\n else:\n input_shape[2] = self.height\n input_shape[3] = self.width\n else:\n if self.data_format == \"channels_last\":\n input_shape[0] = self.height\n input_shape[1] = self.width\n else:\n input_shape[1] = self.height\n input_shape[2] = self.width\n return tuple(input_shape)\n\n def get_config(self):\n base_config = super().get_config()\n config = {\n \"height\": self.height,\n \"width\": self.width,\n \"data_format\": self.data_format,\n }\n return {**base_config, **config}\n",
"path": "keras_core/layers/preprocessing/center_crop.py"
}
] | diff --git a/keras_core/layers/preprocessing/center_crop.py b/keras_core/layers/preprocessing/center_crop.py
index c1caaf1f3..de3c5fee9 100644
--- a/keras_core/layers/preprocessing/center_crop.py
+++ b/keras_core/layers/preprocessing/center_crop.py
@@ -52,6 +52,7 @@ def __init__(self, height, width, data_format=None, **kwargs):
self.data_format = backend.standardize_data_format(data_format)
def call(self, inputs):
+ inputs = self.backend.cast(inputs, self.compute_dtype)
if self.data_format == "channels_first":
init_height = inputs.shape[-2]
init_width = inputs.shape[-1]
diff --git a/keras_core/layers/preprocessing/center_crop_test.py b/keras_core/layers/preprocessing/center_crop_test.py
index 2da851b53..52e56d7d5 100644
--- a/keras_core/layers/preprocessing/center_crop_test.py
+++ b/keras_core/layers/preprocessing/center_crop_test.py
@@ -102,3 +102,13 @@ def test_tf_data_compatibility(self):
for output in ds.take(1):
output = output.numpy()
self.assertEqual(list(output.shape), [2, 8, 9, 3])
+
+ def test_list_compatibility(self):
+ images = [
+ np.random.rand(10, 10, 3),
+ np.random.rand(10, 10, 3),
+ ]
+ output = layers.CenterCrop(height=6, width=5)(images)
+ ref_output = tf.keras.layers.CenterCrop(6, 5)(images)
+ self.assertListEqual(list(output.shape), [2, 6, 5, 3])
+ self.assertAllClose(ref_output, output)
| keras.layers.CenterCrop raises AttributeError when passed a list of images
With tf.keras, the following code works, but not in Keras-Core:
```python
import keras_core as keras
import numpy as np
images = [
np.random.rand(100, 100, 3),
np.random.rand(100, 100, 3),
]
keras.layers.CenterCrop(height=60, width=50)(images) #!!! AttributeError
```
Full stacktrace below:
<details>
```
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[21], line 8
2 import numpy as np
4 images = [
5 np.random.rand(100, 100, 3),
6 np.random.rand(100, 100, 3),
7 ]
----> 8 keras.layers.CenterCrop(height=60, width=50)(images)
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/tf_data_layer.py:36, in TFDataLayer.__call__(self, inputs, **kwargs)
34 self._convert_input_args = True
35 return outputs
---> 36 return super().__call__(inputs, **kwargs)
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/utils/traceback_utils.py:122, in filter_traceback.<locals>.error_handler(*args, **kwargs)
119 filtered_tb = _process_traceback_frames(e.__traceback__)
120 # To get the full stack trace, call:
121 # `keras_core.config.disable_traceback_filtering()`
--> 122 raise e.with_traceback(filtered_tb) from None
123 finally:
124 del filtered_tb
File ~/opt/miniconda3/envs/kerascore/lib/python3.8/site-packages/keras_core/src/layers/preprocessing/center_crop.py:59, in CenterCrop.call(self, inputs)
57 init_width = inputs.shape[-1]
58 else:
---> 59 init_height = inputs.shape[-3]
60 init_width = inputs.shape[-2]
62 if init_height is None or init_width is None:
63 # Dynamic size case. TODO.
AttributeError: Exception encountered when calling CenterCrop.call().
'list' object has no attribute 'shape'
Arguments received by CenterCrop.call():
• inputs=['jnp.ndarray(shape=(100, 100, 3), dtype=float32)', 'jnp.ndarray(shape=(100, 100, 3), dtype=float32)']
```
</details>
A simple workaround is to stack the images:
```python
keras.layers.CenterCrop(height=60, width=50)(np.stack(images))
```
Not sure this can be considered a bug, but it's one of those little differences that may porting code from Keras 2.x to 3.0 a bit harder.
|
streamlit__streamlit-6828 | [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n",
"path": "e2e/scripts/st_select_slider.py"
}
] | [
{
"content": "# Copyright (c) Streamlit Inc. (2018-2022) Snowflake Inc. (2022)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nimport pandas as pd\n\nimport streamlit as st\nfrom streamlit import runtime\n\nw1 = st.select_slider(\n \"Label 1\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n)\nst.write(\"Value 1:\", w1)\n\nw2 = st.select_slider(\n \"Label 2\",\n options=np.array([1, 2, 3, 4, 5]),\n)\nst.write(\"Value 2:\", w2)\n\nw3 = st.select_slider(\n \"Label 3\",\n value=[2, 5],\n options=pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9]),\n)\nst.write(\"Value 3:\", w3)\n\nw4 = st.select_slider(\n \"Label 4\",\n value=5,\n options=pd.DataFrame(\n {\n \"first column\": [1, 2, 3, 4, 5],\n \"second column\": [10, 20, 30, 40, 50],\n }\n ),\n)\nst.write(\"Value 4:\", w4)\n\nw5 = st.select_slider(\n \"Label 5\",\n value=(\"orange\", \"blue\"),\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n disabled=True,\n)\nst.write(\"Value 5:\", w5)\n\nw6 = st.select_slider(\n \"Label 6\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"hidden\",\n)\n\nst.write(\"Value 6:\", w6)\n\n\nw7 = st.select_slider(\n \"Label 7\",\n options=[\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"indigo\", \"violet\"],\n label_visibility=\"collapsed\",\n)\n\nst.write(\"Value 7:\", w7)\n\nif runtime.exists():\n\n def on_change():\n st.session_state.select_slider_changed = True\n\n st.select_slider(\n \"Label 8\",\n options=np.array([1, 2, 3, 4, 5]),\n key=\"select_slider8\",\n on_change=on_change,\n )\n st.write(\"Value 8:\", st.session_state.select_slider8)\n st.write(\"Select slider changed:\", \"select_slider_changed\" in st.session_state)\n\nwith st.expander(\"Expander\", expanded=True):\n w9 = st.select_slider(\n label=\"Label 9\",\n options=[\"foo\", \"bar\", \"baz\", \"This is a very, very long option\"],\n value=\"This is a very, very long option\",\n )\n\n st.write(\"Value 9:\", w9)\n",
"path": "e2e/scripts/st_select_slider.py"
}
] | diff --git a/e2e/scripts/st_select_slider.py b/e2e/scripts/st_select_slider.py
index 403fe937f67a..b4c0d0f8ed4d 100644
--- a/e2e/scripts/st_select_slider.py
+++ b/e2e/scripts/st_select_slider.py
@@ -88,3 +88,12 @@ def on_change():
)
st.write("Value 8:", st.session_state.select_slider8)
st.write("Select slider changed:", "select_slider_changed" in st.session_state)
+
+with st.expander("Expander", expanded=True):
+ w9 = st.select_slider(
+ label="Label 9",
+ options=["foo", "bar", "baz", "This is a very, very long option"],
+ value="This is a very, very long option",
+ )
+
+ st.write("Value 9:", w9)
diff --git a/e2e/specs/st_select_slider.spec.js b/e2e/specs/st_select_slider.spec.js
index 581a53ed45a9..3c6bc01515ae 100644
--- a/e2e/specs/st_select_slider.spec.js
+++ b/e2e/specs/st_select_slider.spec.js
@@ -21,7 +21,7 @@ describe("st.select_slider", () => {
});
it("displays correct number of elements", () => {
- cy.get(".element-container .stSlider").should("have.length", 8);
+ cy.get(".element-container .stSlider").should("have.length", 9);
});
it("looks right when disabled", () => {
@@ -166,4 +166,16 @@ describe("st.select_slider", () => {
"Value 8: 2" + "Select slider changed: True"
);
});
+
+ it("realigns label values when expander re-opened", () => {
+ // Closes the expander
+ cy.get(".streamlit-expanderHeader").click();
+
+ // Reopens the expander
+ cy.get(".streamlit-expanderHeader").click();
+
+ // Positioning error occurs on overflow of expander container
+ // which occurs when position left set to 0px
+ cy.getIndexed(".StyledThumbValue", 11).should("not.have.css", "left", "0px")
+ });
});
diff --git a/e2e/specs/st_slider.spec.js b/e2e/specs/st_slider.spec.js
index 5ef4116d778a..352f0dcd8471 100644
--- a/e2e/specs/st_slider.spec.js
+++ b/e2e/specs/st_slider.spec.js
@@ -43,16 +43,16 @@ describe("st.slider", () => {
cy.get(".stSlider label").should(
"have.text",
"Label A" +
- "Range A" +
- "Label B" +
- "Range B" +
- "Label 1" +
- "Label 2" +
- "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label" +
- "Label 4" +
- "Label 5" +
- "Label 6" +
- "Label 7"
+ "Range A" +
+ "Label B" +
+ "Range B" +
+ "Label 1" +
+ "Label 2" +
+ "Label 3 - This is a very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very very long label" +
+ "Label 4" +
+ "Label 5" +
+ "Label 6" +
+ "Label 7"
);
});
@@ -82,21 +82,33 @@ describe("st.slider", () => {
);
});
+ it("realigns label values when expander re-opened", () => {
+ // Closes the expander
+ cy.get(".streamlit-expanderHeader").click();
+
+ // Reopens the expander
+ cy.get(".streamlit-expanderHeader").click();
+
+ // Positioning error occurs on overflow of expander container
+ // which occurs when position left set to 0px
+ cy.getIndexed(".StyledThumbValue", 5).should("not.have.css", "left", "0px")
+ });
+
it("has correct values", () => {
cy.get(".stMarkdown").should(
"have.text",
"Value A: 12345678" +
- "Range Value A: (10000, 25000)" +
- "Value B: 10000" +
- "Range Value B: (10000, 25000)" +
- "Value 1: 25" +
- "Value 2: (25.0, 75.0)" +
- "Value 3: 1" +
- "Value 4: 10000" +
- "Value 5: 25" +
- "Value 6: 36" +
- "Value 7: 25" +
- "Slider changed: False"
+ "Range Value A: (10000, 25000)" +
+ "Value B: 10000" +
+ "Range Value B: (10000, 25000)" +
+ "Value 1: 25" +
+ "Value 2: (25.0, 75.0)" +
+ "Value 3: 1" +
+ "Value 4: 10000" +
+ "Value 5: 25" +
+ "Value 6: 36" +
+ "Value 7: 25" +
+ "Slider changed: False"
);
});
diff --git a/frontend/src/lib/components/widgets/Slider/Slider.tsx b/frontend/src/lib/components/widgets/Slider/Slider.tsx
index dbbdc4114be4..98490f9fd892 100644
--- a/frontend/src/lib/components/widgets/Slider/Slider.tsx
+++ b/frontend/src/lib/components/widgets/Slider/Slider.tsx
@@ -92,10 +92,13 @@ class Slider extends React.PureComponent<Props, State> {
i < Math.min(this.thumbRef.length, this.thumbValueRef.length);
i++
) {
- this.thumbValueAlignment(
- this.thumbRef[i].current,
- this.thumbValueRef[i].current
- )
+ // Delay the alignment to allow the page layout to complete
+ setTimeout(() => {
+ this.thumbValueAlignment(
+ this.thumbRef[i].current,
+ this.thumbValueRef[i].current
+ )
+ }, 0)
}
if (this.props.element.setValue) {
| Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`
### Checklist
- [x] I have searched the [existing issues](https://github.com/streamlit/streamlit/issues) for similar issues.
- [X] I added a very descriptive title to this issue.
- [X] I have provided sufficient information below to help reproduce this issue.
### Summary
Right-side label of `st.slider` and `st.select_slider` overflows when inside `st.expander`.
In the past I submitted a similar issue for the left-side label (see https://github.com/streamlit/streamlit/issues/5898); now it is the right-side label that is misbehaving.
### Reproducible Code Example
[](https://issues.streamlitapp.com/?issue=gh-6297)
```Python
import streamlit as st
st.title("Right-side label of slider and select_slider overflows when inside expander")
with st.expander('Example st.expander'):
single_value = st.slider(
label='Example st.slider',
min_value=9_500_000,
max_value=10_000_000,
value=10_000_000
)
first_value,last_value = st.slider(
label='Example st.slider (range mode)',
min_value=9_500_000,
max_value=10_000_000,
value=(9_500_000,10_000_000)
)
single_value = st.select_slider(
label='Example st.select_slider',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value='This is a very, very long label'
)
first_value,last_value = st.select_slider(
label='Example st.select_slider (range mode)',
options=['Maradona','Ronaldo','Pele','This is a very, very long label'],
value=['Maradona','This is a very, very long label']
)
```
### Steps To Reproduce
1. Run the reproducible code example
2. Open the expander and see that all the right side labels are overflowing
### Expected Behavior
Labels should not overflow beyond the widgets width.
### Current Behavior
_No response_
### Is this a regression?
- [X] Yes, this used to work in a previous version.
### Debug info
- Streamlit version: 1.20.0
- Python version: 3.11
- Operating System: macOS
- Browser: Brave
- Virtual environment: conda
### Additional Information
_No response_
### Are you willing to submit a PR?
- [ ] Yes, I am willing to submit a PR!
|
apluslms__a-plus-1310 | [
{
"content": "import logging\nfrom typing import Any, Dict, List\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom course.models import CourseModule, LearningObjectCategory\nfrom exercise.models import LearningObject, CourseChapter, BaseExercise, \\\n LTIExercise, StaticExercise, ExerciseWithAttachment, RevealRule, \\\n LTI1p3Exercise\nfrom lib.widgets import DateTimeLocalInput\nfrom .course_forms import FieldsetModelForm\n\nfrom exercise.exercisecollection_models import ExerciseCollection\n\nlogger = logging.getLogger(\"aplus.exercise\")\n\nCOMMON_FIELDS = [\n 'status',\n 'audience',\n 'category',\n 'course_module',\n 'parent',\n 'order',\n 'url',\n]\nSERVICE_FIELDS = [\n 'service_url',\n 'name',\n 'description',\n]\nEXERCISE_FIELDS = [\n 'max_submissions',\n 'max_points',\n 'difficulty',\n 'points_to_pass',\n 'allow_assistant_viewing',\n 'allow_assistant_grading',\n 'min_group_size',\n 'max_group_size',\n 'model_answers',\n 'templates',\n 'grading_mode',\n]\n\n\nclass LearningObjectMixin:\n\n def init_fields(self, **kwargs):\n self.lobject = kwargs.get('instance')\n self.fields[\"category\"].queryset = LearningObjectCategory.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"course_module\"].queryset = CourseModule.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"parent\"].queryset = LearningObject.objects\\\n .exclude(id=self.lobject.id)\\\n .filter(course_module=self.lobject.course_module)\n\n @property\n def remote_service_head(self):\n return True\n\n def get_hierarchy_fieldset(self):\n return { 'legend':_('HIERARCHY'), 'fields':self.get_fields('status',\n 'audience', 'category','course_module','parent','order','url') }\n\n def get_content_fieldset(self, *add):\n return { 'legend':_('CONTENT'), 'fields':self.get_fields('name',\n 'description', *add) }\n\n\nclass CourseChapterForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = CourseChapter\n fields = COMMON_FIELDS + SERVICE_FIELDS + [\n 'use_wide_column',\n 'generate_table_of_contents'\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n def get_fieldsets(self):\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset(\n 'use_wide_column', 'generate_table_of_contents'),\n ]\n\n\nclass RevealRuleForm(FieldsetModelForm):\n # This form is only used internally by BaseExerciseForm.\n\n class Meta:\n model = RevealRule\n fields = ['trigger', 'delay_minutes', 'time', 'currently_revealed']\n widgets = {'time': DateTimeLocalInput}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.fields['trigger'].widget.attrs['data-trigger'] = True\n # Visibility rules for the form fields. Each of the following fields is\n # only visible when one of their specified values is selected from the\n # trigger dropdown. See edit_model.html.\n self.fields['currently_revealed'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.MANUAL.value,\n ]\n self.fields['time'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.TIME.value,\n ]\n self.fields['delay_minutes'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.DEADLINE.value,\n RevealRule.TRIGGER.DEADLINE_ALL.value,\n RevealRule.TRIGGER.DEADLINE_OR_FULL_POINTS.value,\n ]\n\n def clean(self) -> Dict[str, Any]:\n result = super().clean()\n errors = {}\n trigger = self.cleaned_data.get('trigger')\n if trigger == RevealRule.TRIGGER.TIME:\n time = self.cleaned_data.get('time')\n if time is None:\n errors['time'] = _(\n 'ERROR_REQUIRED_WITH_SELECTED_TRIGGER'\n )\n if errors:\n raise forms.ValidationError(errors)\n return result\n\n\nclass BaseExerciseForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = BaseExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n # This form contains two embedded RevealRuleForms.\n self.submission_feedback_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_submission_feedback_reveal_rule,\n prefix='submission_feedback',\n )\n self.model_solutions_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_model_solutions_reveal_rule,\n prefix='model_solutions',\n )\n\n def get_fieldsets(self) -> List[Dict[str, Any]]:\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset('model_answers', 'templates'),\n { 'legend':_('GRADING'), 'fields':self.get_fields('max_submissions',\n 'max_points','points_to_pass', 'difficulty',\n 'allow_assistant_viewing','allow_assistant_grading','grading_mode') },\n { 'legend':_('GROUPS'), 'fields':self.get_fields('min_group_size',\n 'max_group_size') },\n { 'legend':_('REVEAL_SUBMISSION_FEEDBACK'), 'fields':self.submission_feedback_form },\n { 'legend':_('REVEAL_MODEL_SOLUTIONS'), 'fields':self.model_solutions_form },\n ]\n\n def is_valid(self) -> bool:\n return (\n super().is_valid()\n and self.submission_feedback_form.is_valid()\n and self.model_solutions_form.is_valid()\n )\n\n def save(self, *args: Any, **kwargs: Any) -> Any:\n # Save the reveal rules only if they have been changed.\n # If they were not changed, we can keep using the default rule and\n # there's no need to save a new RevealRule.\n if self.submission_feedback_form.has_changed():\n self.instance.submission_feedback_reveal_rule = (\n self.submission_feedback_form.save(*args, **kwargs)\n )\n if self.model_solutions_form.has_changed():\n self.instance.model_solutions_reveal_rule = (\n self.model_solutions_form.save(*args, **kwargs)\n )\n return super().save(*args, **kwargs)\n\n\nclass LTIExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTIExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'context_id',\n 'resource_link_id',\n 'resource_link_title',\n 'aplus_get_and_post',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('lti_service','context_id',\n 'resource_link_id','resource_link_title',\n 'aplus_get_and_post','open_in_iframe','service_url')\n\n\nclass LTI1p3ExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTI1p3Exercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'custom',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self) -> bool:\n return False\n\n def get_content_fieldset(self, *add) -> Dict[str, Any]:\n return super().get_content_fieldset('lti_service', 'custom', 'open_in_iframe')\n\n\nclass ExerciseWithAttachmentForm(BaseExerciseForm):\n multipart = True\n\n class Meta:\n model = ExerciseWithAttachment\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'content',\n 'files_to_submit',\n 'attachment',\n ]\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'content', 'files_to_submit', 'attachment')\n\n\nclass StaticExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = StaticExercise\n fields = COMMON_FIELDS + EXERCISE_FIELDS + [\n 'name',\n 'description',\n 'exercise_page_content',\n 'submission_page_content',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'exercise_page_content', 'submission_page_content')\n\nclass ExerciseCollectionExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = ExerciseCollection\n fields = COMMON_FIELDS + EXERCISE_FIELDS + SERVICE_FIELDS + \\\n ['target_category']\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('target_category')\n",
"path": "edit_course/exercise_forms.py"
}
] | [
{
"content": "import logging\nfrom typing import Any, Dict, List\n\nfrom django import forms\nfrom django.utils.translation import gettext_lazy as _\n\nfrom course.models import CourseModule, LearningObjectCategory\nfrom exercise.models import LearningObject, CourseChapter, BaseExercise, \\\n LTIExercise, StaticExercise, ExerciseWithAttachment, RevealRule, \\\n LTI1p3Exercise\nfrom lib.widgets import DateTimeLocalInput\nfrom .course_forms import FieldsetModelForm\n\nfrom exercise.exercisecollection_models import ExerciseCollection\n\nlogger = logging.getLogger(\"aplus.exercise\")\n\nCOMMON_FIELDS = [\n 'status',\n 'audience',\n 'category',\n 'course_module',\n 'parent',\n 'order',\n 'url',\n]\nSERVICE_FIELDS = [\n 'service_url',\n 'name',\n 'description',\n]\nEXERCISE_FIELDS = [\n 'max_submissions',\n 'max_points',\n 'difficulty',\n 'points_to_pass',\n 'allow_assistant_viewing',\n 'allow_assistant_grading',\n 'min_group_size',\n 'max_group_size',\n 'model_answers',\n 'templates',\n 'grading_mode',\n]\n\n\nclass LearningObjectMixin:\n\n def init_fields(self, **kwargs):\n self.lobject = kwargs.get('instance')\n self.fields[\"category\"].queryset = LearningObjectCategory.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"course_module\"].queryset = CourseModule.objects.filter(\n course_instance=self.lobject.course_instance)\n self.fields[\"parent\"].queryset = LearningObject.objects\\\n .exclude(id=self.lobject.id)\\\n .filter(course_module=self.lobject.course_module)\n self.fields['parent'].widget.attrs.update(\n {'readonly': True, 'disabled': True})\n\n @property\n def remote_service_head(self):\n return True\n\n def get_hierarchy_fieldset(self):\n return { 'legend':_('HIERARCHY'), 'fields':self.get_fields('status',\n 'audience', 'category','course_module','parent','order','url') }\n\n def get_content_fieldset(self, *add):\n return { 'legend':_('CONTENT'), 'fields':self.get_fields('name',\n 'description', *add) }\n\n\nclass CourseChapterForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = CourseChapter\n fields = COMMON_FIELDS + SERVICE_FIELDS + [\n 'use_wide_column',\n 'generate_table_of_contents'\n ]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n def get_fieldsets(self):\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset(\n 'use_wide_column', 'generate_table_of_contents'),\n ]\n\n\nclass RevealRuleForm(FieldsetModelForm):\n # This form is only used internally by BaseExerciseForm.\n\n class Meta:\n model = RevealRule\n fields = ['trigger', 'delay_minutes', 'time', 'currently_revealed']\n widgets = {'time': DateTimeLocalInput}\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.fields['trigger'].widget.attrs['data-trigger'] = True\n # Visibility rules for the form fields. Each of the following fields is\n # only visible when one of their specified values is selected from the\n # trigger dropdown. See edit_model.html.\n self.fields['currently_revealed'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.MANUAL.value,\n ]\n self.fields['time'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.TIME.value,\n ]\n self.fields['delay_minutes'].widget.attrs['data-visible-triggers'] = [\n RevealRule.TRIGGER.DEADLINE.value,\n RevealRule.TRIGGER.DEADLINE_ALL.value,\n RevealRule.TRIGGER.DEADLINE_OR_FULL_POINTS.value,\n ]\n\n def clean(self) -> Dict[str, Any]:\n result = super().clean()\n errors = {}\n trigger = self.cleaned_data.get('trigger')\n if trigger == RevealRule.TRIGGER.TIME:\n time = self.cleaned_data.get('time')\n if time is None:\n errors['time'] = _(\n 'ERROR_REQUIRED_WITH_SELECTED_TRIGGER'\n )\n if errors:\n raise forms.ValidationError(errors)\n return result\n\n\nclass BaseExerciseForm(LearningObjectMixin, FieldsetModelForm):\n\n class Meta:\n model = BaseExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS\n\n def __init__(self, *args: Any, **kwargs: Any) -> None:\n super().__init__(*args, **kwargs)\n self.init_fields(**kwargs)\n\n # This form contains two embedded RevealRuleForms.\n self.submission_feedback_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_submission_feedback_reveal_rule,\n prefix='submission_feedback',\n )\n self.model_solutions_form = RevealRuleForm(\n data=kwargs.get('data'),\n instance=self.instance.active_model_solutions_reveal_rule,\n prefix='model_solutions',\n )\n\n def get_fieldsets(self) -> List[Dict[str, Any]]:\n return [\n self.get_hierarchy_fieldset(),\n self.get_content_fieldset('model_answers', 'templates'),\n { 'legend':_('GRADING'), 'fields':self.get_fields('max_submissions',\n 'max_points','points_to_pass', 'difficulty',\n 'allow_assistant_viewing','allow_assistant_grading','grading_mode') },\n { 'legend':_('GROUPS'), 'fields':self.get_fields('min_group_size',\n 'max_group_size') },\n { 'legend':_('REVEAL_SUBMISSION_FEEDBACK'), 'fields':self.submission_feedback_form },\n { 'legend':_('REVEAL_MODEL_SOLUTIONS'), 'fields':self.model_solutions_form },\n ]\n\n def is_valid(self) -> bool:\n return (\n super().is_valid()\n and self.submission_feedback_form.is_valid()\n and self.model_solutions_form.is_valid()\n )\n\n def save(self, *args: Any, **kwargs: Any) -> Any:\n # Save the reveal rules only if they have been changed.\n # If they were not changed, we can keep using the default rule and\n # there's no need to save a new RevealRule.\n if self.submission_feedback_form.has_changed():\n self.instance.submission_feedback_reveal_rule = (\n self.submission_feedback_form.save(*args, **kwargs)\n )\n if self.model_solutions_form.has_changed():\n self.instance.model_solutions_reveal_rule = (\n self.model_solutions_form.save(*args, **kwargs)\n )\n return super().save(*args, **kwargs)\n\n\nclass LTIExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTIExercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'context_id',\n 'resource_link_id',\n 'resource_link_title',\n 'aplus_get_and_post',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('lti_service','context_id',\n 'resource_link_id','resource_link_title',\n 'aplus_get_and_post','open_in_iframe','service_url')\n\n\nclass LTI1p3ExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = LTI1p3Exercise\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'lti_service',\n 'custom',\n 'open_in_iframe',\n ]\n\n @property\n def remote_service_head(self) -> bool:\n return False\n\n def get_content_fieldset(self, *add) -> Dict[str, Any]:\n return super().get_content_fieldset('lti_service', 'custom', 'open_in_iframe')\n\n\nclass ExerciseWithAttachmentForm(BaseExerciseForm):\n multipart = True\n\n class Meta:\n model = ExerciseWithAttachment\n fields = COMMON_FIELDS + SERVICE_FIELDS + EXERCISE_FIELDS + [\n 'content',\n 'files_to_submit',\n 'attachment',\n ]\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'content', 'files_to_submit', 'attachment')\n\n\nclass StaticExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = StaticExercise\n fields = COMMON_FIELDS + EXERCISE_FIELDS + [\n 'name',\n 'description',\n 'exercise_page_content',\n 'submission_page_content',\n ]\n\n @property\n def remote_service_head(self):\n return False\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset(\n 'exercise_page_content', 'submission_page_content')\n\nclass ExerciseCollectionExerciseForm(BaseExerciseForm):\n\n class Meta:\n model = ExerciseCollection\n fields = COMMON_FIELDS + EXERCISE_FIELDS + SERVICE_FIELDS + \\\n ['target_category']\n\n def get_content_fieldset(self, *add):\n return super().get_content_fieldset('target_category')\n",
"path": "edit_course/exercise_forms.py"
}
] | diff --git a/edit_course/exercise_forms.py b/edit_course/exercise_forms.py
index adb5ac12d..d40762edc 100644
--- a/edit_course/exercise_forms.py
+++ b/edit_course/exercise_forms.py
@@ -55,6 +55,8 @@ def init_fields(self, **kwargs):
self.fields["parent"].queryset = LearningObject.objects\
.exclude(id=self.lobject.id)\
.filter(course_module=self.lobject.course_module)
+ self.fields['parent'].widget.attrs.update(
+ {'readonly': True, 'disabled': True})
@property
def remote_service_head(self):
| Users should not be able to edit content hierarchy
Typically the A+ course content structure is configured from a JSON file generated by gitmanager. However, the teachers have possibility to edit the attributes also in the Edit course / Content view, including the parent learning object. By editing the parent selection it is possible to create a circular reference loop between two learning objects pointing to each other as a parent, that leads to excessive number of database operations, and as a result, leading the system to become unusable due to heavy database load.
Easy approach would be to just disable the possibility to modify the parent selection. Later, it might be useful to think more thoroughly how important it is allow editing the content structure in this view, while the main form of course configuration should be through the JSON configuration.
|
conan-io__conan-center-index-7774 | [
{
"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SigslotConan(ConanFile):\n name = \"sigslot\"\n description = \"Sigslot is a header-only, thread safe implementation of signal-slots for C++.\"\n topics = (\"signal\", \"slot\", \"c++14\", \"header-only\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/palacaze/sigslot\"\n license = \"MIT\"\n settings = \"compiler\", \"os\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\" # 14 is not supported by the library\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"sigslot-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(pattern=\"signal.hpp\", src=os.path.join(self._source_subfolder, \"include\", \"sigslot\"), dst=os.path.join(\"include\", \"sigslot\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"PalSigslot\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"PalSigslot\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Pal\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Pal\"\n\n self.cpp_info.components[\"_sigslot\"].libs = []\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package\"] = \"Sigslot\"\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package_multi\"] = \"Sigslot\"\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n self.cpp_info.components[\"_sigslot\"].exelinkflags.append('/OPT:NOICF')\n",
"path": "recipes/sigslot/all/conanfile.py"
}
] | [
{
"content": "import os\n\nfrom conans import ConanFile, tools\nfrom conans.errors import ConanInvalidConfiguration\n\n\nclass SigslotConan(ConanFile):\n name = \"sigslot\"\n description = \"Sigslot is a header-only, thread safe implementation of signal-slots for C++.\"\n topics = (\"signal\", \"slot\", \"c++14\", \"header-only\")\n url = \"https://github.com/conan-io/conan-center-index\"\n homepage = \"https://github.com/palacaze/sigslot\"\n license = \"MIT\"\n settings = \"compiler\", \"os\"\n no_copy_source = True\n\n @property\n def _source_subfolder(self):\n return \"source_subfolder\"\n\n def configure(self):\n minimal_cpp_standard = \"14\"\n if self.settings.compiler.cppstd:\n tools.check_min_cppstd(self, minimal_cpp_standard)\n minimal_version = {\n \"gcc\": \"5\",\n \"clang\": \"3.4\",\n \"apple-clang\": \"10\",\n \"Visual Studio\": \"15\" # 14 is not supported by the library\n }\n compiler = str(self.settings.compiler)\n if compiler not in minimal_version:\n self.output.warn(\n \"%s recipe lacks information about the %s compiler standard version support\" % (self.name, compiler))\n self.output.warn(\n \"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n return\n version = tools.Version(self.settings.compiler.version)\n if version < minimal_version[compiler]:\n raise ConanInvalidConfiguration(\"%s requires a compiler that supports at least C++%s\" % (self.name, minimal_cpp_standard))\n\n def source(self):\n tools.get(**self.conan_data[\"sources\"][self.version])\n extracted_dir = \"sigslot-\" + self.version\n os.rename(extracted_dir, self._source_subfolder)\n\n def package(self):\n self.copy(pattern=\"LICENSE\", src=self._source_subfolder, dst=\"licenses\")\n self.copy(pattern=\"signal.hpp\", src=os.path.join(self._source_subfolder, \"include\", \"sigslot\"), dst=os.path.join(\"include\", \"sigslot\"))\n\n def package_id(self):\n self.info.header_only()\n\n def package_info(self):\n self.cpp_info.filenames[\"cmake_find_package\"] = \"PalSigslot\"\n self.cpp_info.filenames[\"cmake_find_package_multi\"] = \"PalSigslot\"\n self.cpp_info.names[\"cmake_find_package\"] = \"Pal\"\n self.cpp_info.names[\"cmake_find_package_multi\"] = \"Pal\"\n\n self.cpp_info.components[\"_sigslot\"].libs = []\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package\"] = \"Sigslot\"\n self.cpp_info.components[\"_sigslot\"].names[\"cmake_find_package_multi\"] = \"Sigslot\"\n\n if self.settings.os == \"Linux\":\n self.cpp_info.components[\"_sigslot\"].system_libs.append(\"pthread\")\n if self.settings.os == \"Windows\":\n if self.settings.compiler in (\"Visual Studio\", \"clang\"):\n self.cpp_info.components[\"_sigslot\"].exelinkflags.append('-OPT:NOICF')\n",
"path": "recipes/sigslot/all/conanfile.py"
}
] | diff --git a/recipes/sigslot/all/conandata.yml b/recipes/sigslot/all/conandata.yml
index b033f4e57ec7d..6dc6431ba8df8 100644
--- a/recipes/sigslot/all/conandata.yml
+++ b/recipes/sigslot/all/conandata.yml
@@ -2,3 +2,6 @@ sources:
"1.2.0":
sha256: "751852BCB1871AA2CA9F30B34614D028BC44379BBD6F91327744724C652E7CE8"
url: "https://github.com/palacaze/sigslot/archive/v1.2.0.tar.gz"
+ "1.2.1":
+ sha256: "180B45E41676A730220E3A9AF6EE71B761F23B8F6ADE73C8F2AA20B677504934"
+ url: "https://github.com/palacaze/sigslot/archive/v1.2.1.tar.gz"
diff --git a/recipes/sigslot/all/conanfile.py b/recipes/sigslot/all/conanfile.py
index f55bcbd2a7f01..176d1b5daec3c 100644
--- a/recipes/sigslot/all/conanfile.py
+++ b/recipes/sigslot/all/conanfile.py
@@ -65,4 +65,4 @@ def package_info(self):
self.cpp_info.components["_sigslot"].system_libs.append("pthread")
if self.settings.os == "Windows":
if self.settings.compiler in ("Visual Studio", "clang"):
- self.cpp_info.components["_sigslot"].exelinkflags.append('/OPT:NOICF')
+ self.cpp_info.components["_sigslot"].exelinkflags.append('-OPT:NOICF')
diff --git a/recipes/sigslot/config.yml b/recipes/sigslot/config.yml
index 7ed1f1b6fc695..307602a029f0f 100644
--- a/recipes/sigslot/config.yml
+++ b/recipes/sigslot/config.yml
@@ -1,3 +1,5 @@
versions:
"1.2.0":
folder: all
+ "1.2.1":
+ folder: all
| [request] sigslot/1.2.1
### Package Details
* Package Name/Version: **sigslot/1.2.1**
* Changelog: **https://github.com/palacaze/sigslot/releases/tag/v1.2.1**
The above mentioned version is newly released by the upstream project and not yet available as a recipe. Please add this version.
|
qutip__qutip-1390 | [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nimport scipy.sparse as sp\nimport ctypes\nfrom ctypes import POINTER,c_int,c_char,c_double, byref\nfrom numpy import ctypeslib\nimport qutip.settings as qset\nzcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv\n\ndef mkl_spmv(A, x):\n \"\"\"\n sparse csr_spmv using MKL\n \"\"\"\n (m,n) = A.shape\n\n # Pointers to data of the matrix\n data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n if x.ndim is 1:\n y = np.empty(m,dtype=np.complex,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex,order='C')\n else:\n raise Exception('Input vector must be 1D row or 2D column vector')\n \n np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n # now call MKL. This returns the answer in np_y, which points to y\n zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y ) \n return y\n",
"path": "qutip/_mkl/spmv.py"
}
] | [
{
"content": "# This file is part of QuTiP: Quantum Toolbox in Python.\n#\n# Copyright (c) 2011 and later, Paul D. Nation.\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names\n# of its contributors may be used to endorse or promote products derived\n# from this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A\n# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT\n# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,\n# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT\n# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,\n# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY\n# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n###############################################################################\nimport numpy as np\nimport scipy.sparse as sp\nimport ctypes\nfrom ctypes import POINTER,c_int,c_char,c_double, byref\nfrom numpy import ctypeslib\nimport qutip.settings as qset\nzcsrgemv = qset.mkl_lib.mkl_cspblas_zcsrgemv\n\ndef mkl_spmv(A, x):\n \"\"\"\n sparse csr_spmv using MKL\n \"\"\"\n (m,n) = A.shape\n\n # Pointers to data of the matrix\n data = A.data.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n indptr = A.indptr.ctypes.data_as(POINTER(c_int))\n indices = A.indices.ctypes.data_as(POINTER(c_int))\n\n # Allocate output, using same conventions as input\n if x.ndim == 1:\n y = np.empty(m,dtype=np.complex,order='C')\n elif x.ndim==2 and x.shape[1]==1:\n y = np.empty((m,1),dtype=np.complex,order='C')\n else:\n raise Exception('Input vector must be 1D row or 2D column vector')\n \n np_x = x.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n np_y = y.ctypes.data_as(ctypeslib.ndpointer(np.complex128, ndim=1, flags='C'))\n # now call MKL. This returns the answer in np_y, which points to y\n zcsrgemv(byref(c_char(bytes(b'N'))), byref(c_int(m)), data ,indptr, indices, np_x, np_y ) \n return y\n",
"path": "qutip/_mkl/spmv.py"
}
] | diff --git a/qutip/_mkl/spmv.py b/qutip/_mkl/spmv.py
index 66e9a188b3..5774ba6a64 100644
--- a/qutip/_mkl/spmv.py
+++ b/qutip/_mkl/spmv.py
@@ -50,7 +50,7 @@ def mkl_spmv(A, x):
indices = A.indices.ctypes.data_as(POINTER(c_int))
# Allocate output, using same conventions as input
- if x.ndim is 1:
+ if x.ndim == 1:
y = np.empty(m,dtype=np.complex,order='C')
elif x.ndim==2 and x.shape[1]==1:
y = np.empty((m,1),dtype=np.complex,order='C')
| Installation of qutip shows various warnings on ubuntu 20.04.1
**Describe the bug**
Installing the qutip on ubuntu 20.04.1 shows the following warnings:
```
$ sudo apt install python3-qutip
…
Entpacken von python3-qutip (4.4.1-6build1) ...
python3-qutip (4.4.1-6build1) wird eingerichtet ...
/usr/lib/python3/dist-packages/qutip/_mkl/spmv.py:53: SyntaxWarning: "is"
with a literal. Did you mean "=="?
if x.ndim is 1:
/usr/lib/python3/dist-packages/qutip/qobjevo.py:776: SyntaxWarning: "is no
t" with a literal. Did you mean "!="?
if self.compiled and self.compiled.split()[2] is not "cte":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1045: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif op1.type is "array":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1070: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif self.ops[_set[0]].type is "string":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1079: SyntaxWarning: "is"
with a literal. Did you mean "=="?
elif self.ops[_set[0]].type is "array":
/usr/lib/python3/dist-packages/qutip/qobjevo.py:1534: SyntaxWarning: "is n
ot" with a literal. Did you mean "!="?
for key in self.__dict__ if key is not "compiled_qobjevo"}
```
**To Reproduce**
Install qutip on ubuntu 20.04.1 via `sudo apt install python3-qutip`.
**Expected behavior**
No warnings during the installation of qutip.
**Your Environment**
```
>>> qutip.about()
QuTiP: Quantum Toolbox in Python
================================
Copyright (c) QuTiP team 2011 and later.
Original developers: R. J. Johansson & P. D. Nation.
Current admin team: Alexander Pitchford, Paul D. Nation, Nathan Shammah, Shahnawaz Ahmed, Neill Lambert, and Eric Giguère.
Project Manager: Franco Nori.
Currently developed through wide collaboration. See https://github.com/qutip for details.
QuTiP Version: 4.4.1
Numpy Version: 1.17.4
Scipy Version: 1.4.1
Cython Version: 0.29.14
Matplotlib Version: 3.1.2
Python Version: 3.8.5
Number of CPUs: 2
BLAS Info: OPENBLAS
OPENMP Installed: True
INTEL MKL Ext: False
Platform Info: Linux (x86_64)
Installation path: /usr/lib/python3/dist-packages/qutip
```
|
open-telemetry__opentelemetry-python-3848 | [
{
"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import Timestamp\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n",
"path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"
}
] | [
{
"content": "# Copyright The OpenTelemetry Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom os import getpid\nfrom socket import gethostname\nfrom time import time\n\n# pylint: disable=wrong-import-position\nfrom google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module\n Timestamp,\n)\nfrom opencensus.proto.agent.common.v1 import common_pb2\nfrom opencensus.proto.trace.v1 import trace_pb2\n\nfrom opentelemetry.exporter.opencensus.version import (\n __version__ as opencensusexporter_exporter_version,\n)\nfrom opentelemetry.trace import SpanKind\nfrom opentelemetry.util._importlib_metadata import version\n\nOPENTELEMETRY_VERSION = version(\"opentelemetry-api\")\n\n\ndef proto_timestamp_from_time_ns(time_ns):\n \"\"\"Converts datetime to protobuf timestamp.\n\n Args:\n time_ns: Time in nanoseconds\n\n Returns:\n Returns protobuf timestamp.\n \"\"\"\n ts = Timestamp()\n if time_ns is not None:\n # pylint: disable=no-member\n ts.FromNanoseconds(time_ns)\n return ts\n\n\n# pylint: disable=no-member\ndef get_collector_span_kind(kind: SpanKind):\n if kind is SpanKind.SERVER:\n return trace_pb2.Span.SpanKind.SERVER\n if kind is SpanKind.CLIENT:\n return trace_pb2.Span.SpanKind.CLIENT\n return trace_pb2.Span.SpanKind.SPAN_KIND_UNSPECIFIED\n\n\ndef add_proto_attribute_value(pb_attributes, key, value):\n \"\"\"Sets string, int, boolean or float value on protobuf\n span, link or annotation attributes.\n\n Args:\n pb_attributes: protobuf Span's attributes property.\n key: attribute key to set.\n value: attribute value\n \"\"\"\n\n if isinstance(value, bool):\n pb_attributes.attribute_map[key].bool_value = value\n elif isinstance(value, int):\n pb_attributes.attribute_map[key].int_value = value\n elif isinstance(value, str):\n pb_attributes.attribute_map[key].string_value.value = value\n elif isinstance(value, float):\n pb_attributes.attribute_map[key].double_value = value\n else:\n pb_attributes.attribute_map[key].string_value.value = str(value)\n\n\n# pylint: disable=no-member\ndef get_node(service_name, host_name):\n \"\"\"Generates Node message from params and system information.\n\n Args:\n service_name: Name of Collector service.\n host_name: Host name.\n \"\"\"\n return common_pb2.Node(\n identifier=common_pb2.ProcessIdentifier(\n host_name=gethostname() if host_name is None else host_name,\n pid=getpid(),\n start_timestamp=proto_timestamp_from_time_ns(int(time() * 1e9)),\n ),\n library_info=common_pb2.LibraryInfo(\n language=common_pb2.LibraryInfo.Language.Value(\"PYTHON\"),\n exporter_version=opencensusexporter_exporter_version,\n core_library_version=OPENTELEMETRY_VERSION,\n ),\n service_info=common_pb2.ServiceInfo(name=service_name),\n )\n",
"path": "exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py"
}
] | diff --git a/docs/examples/fork-process-model/flask-gunicorn/requirements.txt b/docs/examples/fork-process-model/flask-gunicorn/requirements.txt
index ad166e35901..e1e8b00aa21 100644
--- a/docs/examples/fork-process-model/flask-gunicorn/requirements.txt
+++ b/docs/examples/fork-process-model/flask-gunicorn/requirements.txt
@@ -12,7 +12,7 @@ opentelemetry-instrumentation==0.41b0
opentelemetry-instrumentation-flask==0.41b0
opentelemetry-instrumentation-wsgi==0.41b0
opentelemetry-sdk==1.20.0
-protobuf==3.19.5
+protobuf==3.20.3
six==1.15.0
thrift==0.13.0
uWSGI==2.0.22
diff --git a/docs/examples/fork-process-model/flask-uwsgi/requirements.txt b/docs/examples/fork-process-model/flask-uwsgi/requirements.txt
index ad166e35901..e1e8b00aa21 100644
--- a/docs/examples/fork-process-model/flask-uwsgi/requirements.txt
+++ b/docs/examples/fork-process-model/flask-uwsgi/requirements.txt
@@ -12,7 +12,7 @@ opentelemetry-instrumentation==0.41b0
opentelemetry-instrumentation-flask==0.41b0
opentelemetry-instrumentation-wsgi==0.41b0
opentelemetry-sdk==1.20.0
-protobuf==3.19.5
+protobuf==3.20.3
six==1.15.0
thrift==0.13.0
uWSGI==2.0.22
diff --git a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
index 694e8dc6a10..77eed6ffd17 100644
--- a/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
+++ b/exporter/opentelemetry-exporter-opencensus/src/opentelemetry/exporter/opencensus/util.py
@@ -17,7 +17,9 @@
from time import time
# pylint: disable=wrong-import-position
-from google.protobuf.timestamp_pb2 import Timestamp
+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module
+ Timestamp,
+)
from opencensus.proto.agent.common.v1 import common_pb2
from opencensus.proto.trace.v1 import trace_pb2
diff --git a/exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py b/exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py
index fa546cde7a2..75340da192c 100644
--- a/exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py
+++ b/exporter/opentelemetry-exporter-opencensus/tests/test_otcollector_trace_exporter.py
@@ -16,7 +16,9 @@
from unittest import mock
import grpc
-from google.protobuf.timestamp_pb2 import Timestamp
+from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module
+ Timestamp,
+)
from opencensus.proto.trace.v1 import trace_pb2
import opentelemetry.exporter.opencensus.util as utils
diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-0.txt b/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-0.txt
new file mode 100644
index 00000000000..a692923fc42
--- /dev/null
+++ b/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-0.txt
@@ -0,0 +1,19 @@
+asgiref==3.7.2
+attrs==23.2.0
+Deprecated==1.2.14
+flaky==3.7.0
+importlib-metadata==6.11.0
+iniconfig==2.0.0
+packaging==23.2
+pluggy==1.4.0
+protobuf==3.20.3
+py==1.11.0
+py-cpuinfo==9.0.0
+pytest==7.1.3
+pytest-benchmark==4.0.0
+tomli==2.0.1
+typing_extensions==4.10.0
+wrapt==1.16.0
+zipp==3.17.0
+-e opentelemetry-proto
+-e exporter/opentelemetry-exporter-otlp-proto-common
diff --git a/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt b/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-1.txt
similarity index 100%
rename from exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt
rename to exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-1.txt
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt b/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt
index aa28c101cf4..c1ef1b74296 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt
@@ -8,7 +8,7 @@ importlib-metadata==6.11.0
iniconfig==2.0.0
packaging==23.2
pluggy==1.4.0
-protobuf==3.19.6
+protobuf==3.20.3
py==1.11.0
py-cpuinfo==9.0.0
pytest==7.1.3
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py
index be0c7e1b0f3..da66f830c2f 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/logs/test_otlp_logs_exporter.py
@@ -18,7 +18,9 @@
from unittest import TestCase
from unittest.mock import patch
-from google.protobuf.duration_pb2 import Duration
+from google.protobuf.duration_pb2 import ( # pylint: disable=no-name-in-module
+ Duration,
+)
from google.rpc.error_details_pb2 import RetryInfo
from grpc import ChannelCredentials, Compression, StatusCode, server
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_exporter_mixin.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_exporter_mixin.py
index 4dfed3e1541..c5e1ed76e70 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_exporter_mixin.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_exporter_mixin.py
@@ -20,7 +20,9 @@
from unittest import TestCase
from unittest.mock import Mock, patch
-from google.protobuf.duration_pb2 import Duration
+from google.protobuf.duration_pb2 import ( # pylint: disable=no-name-in-module
+ Duration,
+)
from google.rpc.error_details_pb2 import RetryInfo
from grpc import Compression
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_metrics_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_metrics_exporter.py
index 95733b917bf..c52e94a00ea 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_metrics_exporter.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_metrics_exporter.py
@@ -24,7 +24,9 @@
from unittest import TestCase
from unittest.mock import patch
-from google.protobuf.duration_pb2 import Duration
+from google.protobuf.duration_pb2 import ( # pylint: disable=no-name-in-module
+ Duration,
+)
from google.rpc.error_details_pb2 import RetryInfo
from grpc import ChannelCredentials, Compression, StatusCode, server
diff --git a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_trace_exporter.py b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_trace_exporter.py
index bb17e35b7b7..3424d2c0898 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_trace_exporter.py
+++ b/exporter/opentelemetry-exporter-otlp-proto-grpc/tests/test_otlp_trace_exporter.py
@@ -20,7 +20,9 @@
from unittest import TestCase
from unittest.mock import Mock, PropertyMock, patch
-from google.protobuf.duration_pb2 import Duration
+from google.protobuf.duration_pb2 import ( # pylint: disable=no-name-in-module
+ Duration,
+)
from google.rpc.error_details_pb2 import RetryInfo
from grpc import ChannelCredentials, Compression, StatusCode, server
diff --git a/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt b/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt
index 0faf2626687..cad177da7b5 100644
--- a/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt
+++ b/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt
@@ -10,7 +10,7 @@ importlib-metadata==6.11.0
iniconfig==2.0.0
packaging==23.2
pluggy==1.4.0
-protobuf==3.19.6
+protobuf==3.20.3
py==1.11.0
py-cpuinfo==9.0.0
pytest==7.1.3
diff --git a/opentelemetry-proto/test-requirements.txt b/opentelemetry-proto/test-requirements-0.txt
similarity index 94%
rename from opentelemetry-proto/test-requirements.txt
rename to opentelemetry-proto/test-requirements-0.txt
index 4908ed5baaf..811544a0826 100644
--- a/opentelemetry-proto/test-requirements.txt
+++ b/opentelemetry-proto/test-requirements-0.txt
@@ -6,6 +6,7 @@ importlib-metadata==6.11.0
iniconfig==2.0.0
packaging==23.2
pluggy==1.4.0
+protobuf==3.20.3
py==1.11.0
py-cpuinfo==9.0.0
pytest==7.1.3
diff --git a/opentelemetry-proto/test-requirements-1.txt b/opentelemetry-proto/test-requirements-1.txt
new file mode 100644
index 00000000000..5b10e6867a7
--- /dev/null
+++ b/opentelemetry-proto/test-requirements-1.txt
@@ -0,0 +1,18 @@
+asgiref==3.7.2
+attrs==23.2.0
+Deprecated==1.2.14
+flaky==3.7.0
+importlib-metadata==6.11.0
+iniconfig==2.0.0
+packaging==23.2
+pluggy==1.4.0
+protobuf==4.25.3
+py==1.11.0
+py-cpuinfo==9.0.0
+pytest==7.1.3
+pytest-benchmark==4.0.0
+tomli==2.0.1
+typing_extensions==4.10.0
+wrapt==1.16.0
+zipp==3.17.0
+-e opentelemetry-proto
diff --git a/tox.ini b/tox.ini
index f537bd6216b..2e12556b4d1 100644
--- a/tox.ini
+++ b/tox.ini
@@ -8,9 +8,12 @@ envlist =
py3{8,9,10,11}-opentelemetry-api
pypy3-opentelemetry-api
- ; Test against both protobuf 3.x and 4.x
- py3{8,9,10,11}-proto{3,4}-opentelemetry-protobuf
- pypy3-proto{3,4}-opentelemetry-protobuf
+ ; The numbers at the end of the environment names
+ ; below mean these dependencies are being used:
+ ; 0: protobuf==3.20.3
+ ; 1: protobuf==4.25.3
+ py3{8,9,10,11}-opentelemetry-protobuf-{0,1}
+ pypy3-opentelemetry-protobuf-{0,1}
py3{8,9,10,11}-opentelemetry-sdk
pypy3-opentelemetry-sdk
@@ -31,18 +34,30 @@ envlist =
py3{8,9,10,11}-opentelemetry-exporter-opencensus
; exporter-opencensus intentionally excluded from pypy3
- py3{8,9,10,11}-proto{3,4}-opentelemetry-exporter-otlp-proto-common
- pypy3-proto{3,4}-opentelemetry-exporter-otlp-proto-common
+ ; The numbers at the end of the environment names
+ ; below mean these dependencies are being used:
+ ; 0: protobuf==3.20.3
+ ; 1: protobuf==4.25.3
+ py3{8,9,10,11}-opentelemetry-exporter-otlp-proto-common-{0,1}
+ pypy3-opentelemetry-exporter-otlp-proto-common-{0,1}
; opentelemetry-exporter-otlp
py3{8,9,10,11}-opentelemetry-exporter-otlp-combined
; intentionally excluded from pypy3
- py3{8,9,10,11}-proto{3,4}-opentelemetry-exporter-otlp-proto-grpc
+ ; The numbers at the end of the environment names
+ ; below mean these dependencies are being used:
+ ; 0: protobuf==3.20.3
+ ; 1: protobuf==4.25.3
+ py3{8,9,10,11}-opentelemetry-exporter-otlp-proto-grpc-{0,1}
; intentionally excluded from pypy3
- py3{8,9,10,11}-proto{3,4}-opentelemetry-exporter-otlp-proto-http
- pypy3-opentelemetry-proto{3,4}-exporter-otlp-proto-http
+ ; The numbers at the end of the environment names
+ ; below mean these dependencies are being used:
+ ; 0: protobuf==3.20.3
+ ; 1: protobuf==4.25.3
+ py3{8,9,10,11}-opentelemetry-exporter-otlp-proto-http-{0,1}
+ pypy3-opentelemetry-exporter-otlp-proto-http-{0,1}
py3{8,9,10,11}-opentelemetry-exporter-prometheus
pypy3-opentelemetry-exporter-prometheus
@@ -109,7 +124,8 @@ commands_pre =
; cases but it saves a lot of boilerplate in this file.
opentelemetry: pip install {toxinidir}/opentelemetry-api {toxinidir}/opentelemetry-semantic-conventions {toxinidir}/opentelemetry-sdk {toxinidir}/tests/opentelemetry-test-utils
- protobuf: pip install -r {toxinidir}/opentelemetry-proto/test-requirements.txt
+ protobuf-0: pip install -r {toxinidir}/opentelemetry-proto/test-requirements-0.txt
+ protobuf-1: pip install -r {toxinidir}/opentelemetry-proto/test-requirements-1.txt
getting-started: pip install -r {toxinidir}/docs/getting_started/tests/requirements.txt
getting-started: pip install -e {env:CONTRIB_REPO}\#egg=opentelemetry-util-http&subdirectory=util/opentelemetry-util-http
@@ -120,15 +136,16 @@ commands_pre =
exporter-opencensus: pip install -r {toxinidir}/exporter/opentelemetry-exporter-opencensus/test-requirements.txt
- exporter-otlp-proto-common: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt
+ exporter-otlp-proto-common-0: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-0.txt
+ exporter-otlp-proto-common-1: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-1.txt
exporter-otlp-combined: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp/test-requirements.txt
- proto3-opentelemetry-exporter-otlp-proto-grpc: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt
- proto4-opentelemetry-exporter-otlp-proto-grpc: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-1.txt
+ opentelemetry-exporter-otlp-proto-grpc-0: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt
+ opentelemetry-exporter-otlp-proto-grpc-1: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-1.txt
- proto3-opentelemetry-exporter-otlp-proto-http: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt
- proto4-opentelemetry-exporter-otlp-proto-http: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-1.txt
+ opentelemetry-exporter-otlp-proto-http-0: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt
+ opentelemetry-exporter-otlp-proto-http-1: pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-1.txt
opentracing-shim: pip install -r {toxinidir}/shim/opentelemetry-opentracing-shim/test-requirements.txt
@@ -204,27 +221,24 @@ deps =
-r dev-requirements.txt
commands_pre =
- pip install -r {toxinidir}/opentelemetry-api/test-requirements.txt
- pip install -r {toxinidir}/opentelemetry-sdk/test-requirements.txt
- pip install -r {toxinidir}/opentelemetry-semantic-conventions/test-requirements.txt
- pip install -r {toxinidir}/opentelemetry-proto/test-requirements.txt
- pip install -r {toxinidir}/shim/opentelemetry-opentracing-shim/test-requirements.txt
- pip install -r {toxinidir}/shim/opentelemetry-opencensus-shim/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-opencensus/test-requirements.txt
- pip install -r {toxinidir}/tests/opentelemetry-test-utils/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-1.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-1.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-prometheus/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-otlp/test-requirements.txt
- pip install -r {toxinidir}/exporter/opentelemetry-exporter-zipkin/test-requirements.txt
- pip install -r {toxinidir}/propagator/opentelemetry-propagator-b3/test-requirements.txt
- pip install -r {toxinidir}/propagator/opentelemetry-propagator-jaeger/test-requirements.txt
- # Pin protobuf version due to lint failing on v3.20.0
- # https://github.com/protocolbuffers/protobuf/issues/9730
- python -m pip install protobuf==3.19.4
+ pip install -r {toxinidir}/opentelemetry-api/test-requirements.txt \
+ -r {toxinidir}/opentelemetry-sdk/test-requirements.txt \
+ -r {toxinidir}/opentelemetry-semantic-conventions/test-requirements.txt \
+ -r {toxinidir}/opentelemetry-proto/test-requirements-0.txt \
+ -r {toxinidir}/shim/opentelemetry-opentracing-shim/test-requirements.txt \
+ -r {toxinidir}/shim/opentelemetry-opencensus-shim/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-opencensus/test-requirements.txt \
+ -r {toxinidir}/tests/opentelemetry-test-utils/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-common/test-requirements-0.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-http/test-requirements-0.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-otlp-proto-grpc/test-requirements-0.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-prometheus/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-proto-http/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-zipkin-json/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-otlp/test-requirements.txt \
+ -r {toxinidir}/exporter/opentelemetry-exporter-zipkin/test-requirements.txt \
+ -r {toxinidir}/propagator/opentelemetry-propagator-b3/test-requirements.txt \
+ -r {toxinidir}/propagator/opentelemetry-propagator-jaeger/test-requirements.txt
commands =
python scripts/eachdist.py lint --check-only
| lint takes a long time
Fix that.
|
kivy__kivy-4728 | [
{
"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implemented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def can_load_memory():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tobytes())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n if not self._inline:\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n image = PILImage.fromstring(fmt.upper(), (width, height), pixels)\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n",
"path": "kivy/core/image/img_pil.py"
}
] | [
{
"content": "'''\nPIL: PIL image loader\n'''\n\n__all__ = ('ImageLoaderPIL', )\n\ntry:\n from PIL import Image as PILImage\nexcept:\n import Image as PILImage\n\nfrom kivy.logger import Logger\nfrom kivy.core.image import ImageLoaderBase, ImageData, ImageLoader\n\n\nclass ImageLoaderPIL(ImageLoaderBase):\n '''Image loader based on the PIL library.\n\n .. versionadded:: 1.0.8\n\n Support for GIF animation added.\n\n Gif animation has a lot of issues(transparency/color depths... etc).\n In order to keep it simple, what is implemented here is what is\n natively supported by the PIL library.\n\n As a general rule, try to use gifs that have no transparency.\n Gif's with transparency will work but be prepared for some\n artifacts until transparency support is improved.\n\n '''\n\n @staticmethod\n def can_save():\n return True\n\n @staticmethod\n def can_load_memory():\n return True\n\n @staticmethod\n def extensions():\n '''Return accepted extensions for this loader'''\n # See http://www.pythonware.com/library/pil/handbook/index.htm\n return ('bmp', 'bufr', 'cur', 'dcx', 'fits', 'fl', 'fpx', 'gbr',\n 'gd', 'gif', 'grib', 'hdf5', 'ico', 'im', 'imt', 'iptc',\n 'jpeg', 'jpg', 'jpe', 'mcidas', 'mic', 'mpeg', 'msp',\n 'pcd', 'pcx', 'pixar', 'png', 'ppm', 'psd', 'sgi',\n 'spider', 'tga', 'tiff', 'wal', 'wmf', 'xbm', 'xpm',\n 'xv')\n\n def _img_correct(self, _img_tmp):\n '''Convert image to the correct format and orientation.\n '''\n # image loader work only with rgb/rgba image\n if _img_tmp.mode.lower() not in ('rgb', 'rgba'):\n try:\n imc = _img_tmp.convert('RGBA')\n except:\n Logger.warning(\n 'Image: Unable to convert image to rgba (was %s)' %\n (_img_tmp.mode.lower()))\n raise\n _img_tmp = imc\n\n return _img_tmp\n\n def _img_read(self, im):\n '''Read images from an animated file.\n '''\n im.seek(0)\n\n # Read all images inside\n try:\n img_ol = None\n while True:\n img_tmp = im\n img_tmp = self._img_correct(img_tmp)\n if img_ol and (hasattr(im, 'dispose') and not im.dispose):\n # paste new frame over old so as to handle\n # transparency properly\n img_ol.paste(img_tmp, (0, 0), img_tmp)\n img_tmp = img_ol\n img_ol = img_tmp\n yield ImageData(img_tmp.size[0], img_tmp.size[1],\n img_tmp.mode.lower(), img_tmp.tobytes())\n im.seek(im.tell() + 1)\n except EOFError:\n pass\n\n def load(self, filename):\n try:\n im = PILImage.open(filename)\n except:\n Logger.warning('Image: Unable to load image <%s>' % filename)\n raise\n # update internals\n if not self._inline:\n self.filename = filename\n # returns an array of type ImageData len 1 if not a sequence image\n return list(self._img_read(im))\n\n @staticmethod\n def save(filename, width, height, fmt, pixels, flipped=False):\n image = PILImage.frombytes(fmt.upper(), (width, height), pixels)\n\n if flipped:\n image = image.transpose(PILImage.FLIP_TOP_BOTTOM)\n image.save(filename)\n return True\n\n\n# register\nImageLoader.register(ImageLoaderPIL)\n",
"path": "kivy/core/image/img_pil.py"
}
] | diff --git a/kivy/core/image/img_pil.py b/kivy/core/image/img_pil.py
index 139ff81219..93067516a3 100644
--- a/kivy/core/image/img_pil.py
+++ b/kivy/core/image/img_pil.py
@@ -102,7 +102,8 @@ def load(self, filename):
@staticmethod
def save(filename, width, height, fmt, pixels, flipped=False):
- image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
+ image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
+
if flipped:
image = image.transpose(PILImage.FLIP_TOP_BOTTOM)
image.save(filename)
| Error: fromstring() in core/image/img_pil.py
Platform: Linux (OpenSuse, Ubuntu)
[INFO ] [Kivy ] v1.9.1
[INFO ] [Python ] v2.7.12 (default, Jul 01 2016, 15:36:53) [GCC]
Error:
File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py", line 105, in save
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
File "/usr/lib64/python2.7/site-packages/PIL/Image.py", line 2063, in fromstring
"Please call frombytes() instead.")
Exception: fromstring() has been removed. Please call frombytes() instead.
In File "/usr/lib64/python2.7/site-packages/kivy/core/image/img_pil.py"
Line 105:
image = PILImage.fromstring(fmt.upper(), (width, height), pixels)
use...
image = PILImage.frombytes(fmt.upper(), (width, height), pixels)
|
ethereum__consensus-specs-1131 | [
{
"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\n\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import (\n SSZType,\n hash_tree_root,\n signing_root,\n)\nfrom eth2spec.utils.bls_stub import (\n bls_aggregate_pubkeys,\n bls_verify,\n bls_verify_multiple,\n)\nfrom eth2spec.utils.hash_function import hash\n\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n",
"path": "scripts/phase0/build_spec.py"
}
] | [
{
"content": "import sys\nimport function_puller\n\n\ndef build_phase0_spec(sourcefile, outfile):\n code_lines = []\n code_lines.append(\"\"\"\n\nfrom typing import (\n Any,\n Dict,\n List,\n NewType,\n Tuple,\n)\nfrom eth2spec.utils.minimal_ssz import *\nfrom eth2spec.utils.hash_function import hash\nfrom eth2spec.utils.bls import *\n\n\n# stub, will get overwritten by real var\nSLOTS_PER_EPOCH = 64\n\nSlot = NewType('Slot', int) # uint64\nEpoch = NewType('Epoch', int) # uint64\nShard = NewType('Shard', int) # uint64\nValidatorIndex = NewType('ValidatorIndex', int) # uint64\nGwei = NewType('Gwei', int) # uint64\nBytes32 = NewType('Bytes32', bytes) # bytes32\nBLSPubkey = NewType('BLSPubkey', bytes) # bytes48\nBLSSignature = NewType('BLSSignature', bytes) # bytes96\nStore = None\n\"\"\")\n\n code_lines += function_puller.get_spec(sourcefile)\n\n code_lines.append(\"\"\"\n# Monkey patch validator compute committee code\n_compute_committee = compute_committee\ncommittee_cache = {}\n\n\ndef compute_committee(indices: List[ValidatorIndex], seed: Bytes32, index: int, count: int) -> List[ValidatorIndex]:\n param_hash = (hash_tree_root(indices), seed, index, count)\n\n if param_hash in committee_cache:\n return committee_cache[param_hash]\n else:\n ret = _compute_committee(indices, seed, index, count)\n committee_cache[param_hash] = ret\n return ret\n\n\n# Monkey patch hash cache\n_hash = hash\nhash_cache = {}\n\n\ndef hash(x):\n if x in hash_cache:\n return hash_cache[x]\n else:\n ret = _hash(x)\n hash_cache[x] = ret\n return ret\n\n\n# Access to overwrite spec constants based on configuration\ndef apply_constants_preset(preset: Dict[str, Any]):\n global_vars = globals()\n for k, v in preset.items():\n global_vars[k] = v\n\n # Deal with derived constants\n global_vars['GENESIS_EPOCH'] = slot_to_epoch(GENESIS_SLOT)\n\n # Initialize SSZ types again, to account for changed lengths\n init_SSZ_types()\n\"\"\")\n\n with open(outfile, 'w') as out:\n out.write(\"\\n\".join(code_lines))\n\n\nif __name__ == '__main__':\n if len(sys.argv) < 3:\n print(\"Usage: <source phase0> <output phase0 pyspec>\")\n build_phase0_spec(sys.argv[1], sys.argv[2])\n\n",
"path": "scripts/phase0/build_spec.py"
}
] | diff --git a/Makefile b/Makefile
index a6b379b719..401e4bdc9f 100644
--- a/Makefile
+++ b/Makefile
@@ -34,7 +34,7 @@ install_test:
cd $(PY_SPEC_DIR); python3 -m venv venv; . venv/bin/activate; pip3 install -r requirements-testing.txt;
test: $(PY_SPEC_ALL_TARGETS)
- cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest .
+ cd $(PY_SPEC_DIR); . venv/bin/activate; python -m pytest eth2spec
citest: $(PY_SPEC_ALL_TARGETS)
cd $(PY_SPEC_DIR); mkdir -p test-reports/eth2spec; . venv/bin/activate; python -m pytest --junitxml=test-reports/eth2spec/test_results.xml .
diff --git a/scripts/phase0/build_spec.py b/scripts/phase0/build_spec.py
index 14226cbd40..36d039c2ca 100644
--- a/scripts/phase0/build_spec.py
+++ b/scripts/phase0/build_spec.py
@@ -13,17 +13,9 @@ def build_phase0_spec(sourcefile, outfile):
NewType,
Tuple,
)
-from eth2spec.utils.minimal_ssz import (
- SSZType,
- hash_tree_root,
- signing_root,
-)
-from eth2spec.utils.bls_stub import (
- bls_aggregate_pubkeys,
- bls_verify,
- bls_verify_multiple,
-)
+from eth2spec.utils.minimal_ssz import *
from eth2spec.utils.hash_function import hash
+from eth2spec.utils.bls import *
# stub, will get overwritten by real var
diff --git a/specs/core/0_beacon-chain.md b/specs/core/0_beacon-chain.md
index 0678de9d27..9dc052ec42 100644
--- a/specs/core/0_beacon-chain.md
+++ b/specs/core/0_beacon-chain.md
@@ -1248,7 +1248,7 @@ def state_transition(state: BeaconState, block: BeaconBlock, validate_state_root
```python
def process_slots(state: BeaconState, slot: Slot) -> None:
- assert state.slot < slot
+ assert state.slot <= slot
while state.slot < slot:
process_slot(state)
# Process epoch on the first slot of the next epoch
@@ -1775,7 +1775,8 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None:
amount = deposit.data.amount
validator_pubkeys = [v.pubkey for v in state.validator_registry]
if pubkey not in validator_pubkeys:
- # Verify the deposit signature (proof of possession)
+ # Verify the deposit signature (proof of possession).
+ # Invalid signatures are allowed by the deposit contract, and hence included on-chain, but must not be processed.
# Note: deposits are valid across forks, hence the deposit domain is retrieved directly from `bls_domain`
if not bls_verify(
pubkey, signing_root(deposit.data), deposit.data.signature, bls_domain(DOMAIN_DEPOSIT)
diff --git a/specs/test_formats/README.md b/specs/test_formats/README.md
index 4783dc475c..ac84d5bc08 100644
--- a/specs/test_formats/README.md
+++ b/specs/test_formats/README.md
@@ -186,6 +186,18 @@ To prevent parsing of hundreds of different YAML files to test a specific test t
... <--- more test types
```
+## Common test-case properties
+
+Some test-case formats share some common key-value pair patterns, and these are documented here:
+
+```
+bls_setting: int -- optional, can have 3 different values:
+ 0: (default, applies if key-value pair is absent). Free to choose either BLS ON or OFF.
+ Tests are generated with valid BLS data in this case,
+ but there is no change of outcome when running the test if BLS is ON or OFF.
+ 1: known as "BLS required" - if the test validity is strictly dependent on BLS being ON
+ 2: known as "BLS ignored" - if the test validity is strictly dependent on BLS being OFF
+```
## Note for implementers
diff --git a/specs/test_formats/epoch_processing/README.md b/specs/test_formats/epoch_processing/README.md
new file mode 100644
index 0000000000..6384a0eda9
--- /dev/null
+++ b/specs/test_formats/epoch_processing/README.md
@@ -0,0 +1,29 @@
+# Epoch processing tests
+
+The different epoch sub-transitions are tested individually with test handlers.
+The format is similar to block-processing state-transition tests.
+There is no "change" factor however, the transitions are pure functions with just the pre-state as input.
+Hence, the format is shared between each test-handler. (See test condition documentation on how to run the tests.)
+
+## Test case format
+
+```yaml
+description: string -- description of test case, purely for debugging purposes
+bls_setting: int -- see general test-format spec.
+pre: BeaconState -- state before running the sub-transition
+post: BeaconState -- state after applying the epoch sub-transition.
+```
+
+## Condition
+
+A handler of the `epoch_processing` test-runner should process these cases,
+ calling the corresponding processing implementation.
+
+Sub-transitions:
+
+| *`sub-transition-name`* | *`processing call`* |
+|-------------------------|-----------------------------------|
+| `crosslinks` | `process_crosslinks(state)` |
+| `registry_updates` | `process_registry_updates(state)` |
+
+The resulting state should match the expected `post` state.
diff --git a/specs/test_formats/operations/README.md b/specs/test_formats/operations/README.md
index 842dc3615f..32cf880b36 100644
--- a/specs/test_formats/operations/README.md
+++ b/specs/test_formats/operations/README.md
@@ -2,9 +2,34 @@
The different kinds of operations ("transactions") are tested individually with test handlers.
-The tested operation kinds are:
-- [`deposits`](./deposits.md)
-- More tests are work-in-progress.
+## Test case format
+```yaml
+description: string -- description of test case, purely for debugging purposes
+bls_setting: int -- see general test-format spec.
+pre: BeaconState -- state before applying the operation
+<operation-name>: <operation-object> -- the YAML encoded operation, e.g. a "ProposerSlashing", or "Deposit".
+post: BeaconState -- state after applying the operation. No value if operation processing is aborted.
+```
+## Condition
+A handler of the `operations` test-runner should process these cases,
+ calling the corresponding processing implementation.
+
+Operations:
+
+| *`operation-name`* | *`operation-object`* | *`input name`* | *`processing call`* |
+|-------------------------|----------------------|----------------------|--------------------------------------------------------|
+| `attestation` | `Attestation` | `attestation` | `process_attestation(state, attestation)` |
+| `attester_slashing` | `AttesterSlashing` | `attester_slashing` | `process_attester_slashing(state, attester_slashing)` |
+| `block_header` | `Block` | `block` | `process_block_header(state, block)` |
+| `deposit` | `Deposit` | `deposit` | `process_deposit(state, deposit)` |
+| `proposer_slashing` | `ProposerSlashing` | `proposer_slashing` | `process_proposer_slashing(state, proposer_slashing)` |
+| `transfer` | `Transfer` | `transfer` | `process_transfer(state, transfer)` |
+| `voluntary_exit` | `VoluntaryExit` | `voluntary_exit` | `process_voluntary_exit(state, voluntary_exit)` |
+
+Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here.
+
+The resulting state should match the expected `post` state, or if the `post` state is left blank,
+ the handler should reject the input operation as invalid.
diff --git a/specs/test_formats/operations/deposits.md b/specs/test_formats/operations/deposits.md
deleted file mode 100644
index 8f44ebb228..0000000000
--- a/specs/test_formats/operations/deposits.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Test format: Deposit operations
-
-A deposit is a form of an operation (or "transaction"), modifying the state.
-
-## Test case format
-
-```yaml
-description: string -- description of test case, purely for debugging purposes
-pre: BeaconState -- state before applying the deposit
-deposit: Deposit -- the deposit
-post: BeaconState -- state after applying the deposit. No value if deposit processing is aborted.
-```
-
-## Condition
-
-A `deposits` handler of the `operations` should process these cases,
- calling the implementation of the `process_deposit(state, deposit)` functionality described in the spec.
-The resulting state should match the expected `post` state, or if the `post` state is left blank, the handler should reject the inputs as invalid.
diff --git a/specs/test_formats/sanity/README.md b/specs/test_formats/sanity/README.md
new file mode 100644
index 0000000000..20b36208a4
--- /dev/null
+++ b/specs/test_formats/sanity/README.md
@@ -0,0 +1,7 @@
+# Sanity tests
+
+The aim of the sanity tests is to set a base-line on what really needs to pass, i.e. the essentials.
+
+There are two handlers, documented individually:
+- [`slots`](./slots.md): transitions of one or more slots (and epoch transitions within)
+- [`blocks`](./blocks.md): transitions triggered by one or more blocks
diff --git a/specs/test_formats/sanity/blocks.md b/specs/test_formats/sanity/blocks.md
new file mode 100644
index 0000000000..3004a6de70
--- /dev/null
+++ b/specs/test_formats/sanity/blocks.md
@@ -0,0 +1,18 @@
+# Sanity blocks testing
+
+Sanity tests to cover a series of one or more blocks being processed, aiming to cover common changes.
+
+## Test case format
+
+```yaml
+description: string -- description of test case, purely for debugging purposes
+bls_setting: int -- see general test-format spec.
+pre: BeaconState -- state before running through the transitions triggered by the blocks.
+blocks: [BeaconBlock] -- blocks to process, in given order, following the main transition function (i.e. process slot and epoch transitions in between blocks as normal)
+post: BeaconState -- state after applying all the transitions triggered by the blocks.
+```
+
+## Condition
+
+The resulting state should match the expected `post` state, or if the `post` state is left blank,
+ the handler should reject the series of blocks as invalid.
diff --git a/specs/test_formats/sanity/slots.md b/specs/test_formats/sanity/slots.md
new file mode 100644
index 0000000000..81866d47b9
--- /dev/null
+++ b/specs/test_formats/sanity/slots.md
@@ -0,0 +1,23 @@
+# Sanity slots testing
+
+Sanity tests to cover a series of one or more empty-slot transitions being processed, aiming to cover common changes.
+
+## Test case format
+
+```yaml
+description: string -- description of test case, purely for debugging purposes
+bls_setting: int -- see general test-format spec.
+pre: BeaconState -- state before running through the transitions.
+slots: N -- amount of slots to process, N being a positive numer.
+post: BeaconState -- state after applying all the transitions.
+```
+
+The transition with pure time, no blocks, is known as `state_transition_to(state, slot)` in the spec.
+This runs state-caching (pure slot transition) and epoch processing (every E slots).
+
+To process the data, call `state_transition_to(pre, pre.slot + N)`. And see if `pre` mutated into the equivalent of `post`.
+
+
+## Condition
+
+The resulting state should match the expected `post` state.
diff --git a/specs/test_formats/ssz_static/core.md b/specs/test_formats/ssz_static/core.md
index 64b09a3296..f24a225b08 100644
--- a/specs/test_formats/ssz_static/core.md
+++ b/specs/test_formats/ssz_static/core.md
@@ -9,11 +9,11 @@ This test-format ensures these direct serializations are covered.
## Test case format
```yaml
-type_name: string -- string, object name, formatted as in spec. E.g. "BeaconBlock"
-value: dynamic -- the YAML-encoded value, of the type specified by type_name.
-serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
-root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
-signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field
+SomeObjectName: -- key, object name, formatted as in spec. E.g. "BeaconBlock".
+ value: dynamic -- the YAML-encoded value, of the type specified by type_name.
+ serialized: bytes -- string, SSZ-serialized data, hex encoded, with prefix 0x
+ root: bytes32 -- string, hash-tree-root of the value, hex encoded, with prefix 0x
+ signing_root: bytes32 -- string, signing-root of the value, hex encoded, with prefix 0x. Optional, present if type contains ``signature`` field
```
## Condition
diff --git a/test_generators/README.md b/test_generators/README.md
index f8124f9a78..95d7e70a8c 100644
--- a/test_generators/README.md
+++ b/test_generators/README.md
@@ -58,7 +58,7 @@ It's recommended to extend the base-generator.
Create a `requirements.txt` in the root of your generator directory:
```
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec
diff --git a/test_generators/bls/requirements.txt b/test_generators/bls/requirements.txt
index 5eebde29f9..6d83bdfb59 100644
--- a/test_generators/bls/requirements.txt
+++ b/test_generators/bls/requirements.txt
@@ -1,3 +1,3 @@
py-ecc==1.7.0
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
diff --git a/test_generators/epoch_processing/README.md b/test_generators/epoch_processing/README.md
new file mode 100644
index 0000000000..9b57875e2a
--- /dev/null
+++ b/test_generators/epoch_processing/README.md
@@ -0,0 +1,11 @@
+# Epoch processing
+
+Epoch processing covers the sub-transitions during an epoch change.
+
+An epoch-processing test-runner can consume these sub-transition test-suites,
+ and handle different kinds of epoch sub-transitions by processing the cases using the specified test handler.
+
+Information on the format of the tests can be found in the [epoch-processing test formats documentation](../../specs/test_formats/epoch_processing/README.md).
+
+
+
diff --git a/test_generators/epoch_processing/main.py b/test_generators/epoch_processing/main.py
new file mode 100644
index 0000000000..8f067e4a35
--- /dev/null
+++ b/test_generators/epoch_processing/main.py
@@ -0,0 +1,38 @@
+from typing import Callable, Iterable
+
+from eth2spec.phase0 import spec
+from eth2spec.test.epoch_processing import (
+ test_process_crosslinks,
+ test_process_registry_updates
+)
+from gen_base import gen_runner, gen_suite, gen_typing
+from gen_from_tests.gen import generate_from_tests
+from preset_loader import loader
+
+
+def create_suite(transition_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
+ -> Callable[[str], gen_typing.TestSuiteOutput]:
+ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
+ presets = loader.load_presets(configs_path, config_name)
+ spec.apply_constants_preset(presets)
+
+ return ("%s_%s" % (transition_name, config_name), transition_name, gen_suite.render_suite(
+ title="%s epoch processing" % transition_name,
+ summary="Test suite for %s type epoch processing" % transition_name,
+ forks_timeline="testing",
+ forks=["phase0"],
+ config=config_name,
+ runner="epoch_processing",
+ handler=transition_name,
+ test_cases=get_cases()))
+
+ return suite_definition
+
+
+if __name__ == "__main__":
+ gen_runner.run_generator("epoch_processing", [
+ create_suite('crosslinks', 'minimal', lambda: generate_from_tests(test_process_crosslinks)),
+ create_suite('crosslinks', 'mainnet', lambda: generate_from_tests(test_process_crosslinks)),
+ create_suite('registry_updates', 'minimal', lambda: generate_from_tests(test_process_registry_updates)),
+ create_suite('registry_updates', 'mainnet', lambda: generate_from_tests(test_process_registry_updates)),
+ ])
diff --git a/test_generators/epoch_processing/requirements.txt b/test_generators/epoch_processing/requirements.txt
new file mode 100644
index 0000000000..595cee69cd
--- /dev/null
+++ b/test_generators/epoch_processing/requirements.txt
@@ -0,0 +1,4 @@
+eth-utils==1.6.0
+../../test_libs/gen_helpers
+../../test_libs/config_helpers
+../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_generators/operations/README.md b/test_generators/operations/README.md
index e0b9d0e187..5cb3afc989 100644
--- a/test_generators/operations/README.md
+++ b/test_generators/operations/README.md
@@ -3,7 +3,6 @@
Operations (or "transactions" in previous spec iterations),
are atomic changes to the state, introduced by embedding in blocks.
-This generator provides a series of test suites, divided into handler, for each operation type.
An operation test-runner can consume these operation test-suites,
and handle different kinds of operations by processing the cases using the specified test handler.
diff --git a/test_generators/operations/deposits.py b/test_generators/operations/deposits.py
deleted file mode 100644
index 075ccbd5ba..0000000000
--- a/test_generators/operations/deposits.py
+++ /dev/null
@@ -1,180 +0,0 @@
-from eth2spec.phase0 import spec
-from eth_utils import (
- to_dict, to_tuple
-)
-from gen_base import gen_suite, gen_typing
-from preset_loader import loader
-from eth2spec.debug.encode import encode
-from eth2spec.utils.minimal_ssz import signing_root
-from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof
-
-from typing import List, Tuple
-
-import genesis
-import keys
-from py_ecc import bls
-
-
-def build_deposit_data(state,
- pubkey: spec.BLSPubkey,
- withdrawal_cred: spec.Bytes32,
- privkey: int,
- amount: int):
- deposit_data = spec.DepositData(
- pubkey=pubkey,
- withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[1:],
- amount=amount,
- )
- deposit_data.proof_of_possession = bls.sign(
- message_hash=signing_root(deposit_data),
- privkey=privkey,
- domain=spec.get_domain(
- state,
- spec.get_current_epoch(state),
- spec.DOMAIN_DEPOSIT,
- )
- )
- return deposit_data
-
-
-def build_deposit(state,
- deposit_data_leaves: List[spec.Bytes32],
- pubkey: spec.BLSPubkey,
- withdrawal_cred: spec.Bytes32,
- privkey: int,
- amount: int) -> spec.Deposit:
-
- deposit_data = build_deposit_data(state, pubkey, withdrawal_cred, privkey, amount)
-
- item = deposit_data.hash_tree_root()
- index = len(deposit_data_leaves)
- deposit_data_leaves.append(item)
- tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
- proof = list(get_merkle_proof(tree, item_index=index))
-
- deposit = spec.Deposit(
- proof=list(proof),
- index=index,
- data=deposit_data,
- )
- assert spec.verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, get_merkle_root(tuple(deposit_data_leaves)))
-
- return deposit
-
-
-def build_deposit_for_index(initial_validator_count: int, index: int) -> Tuple[spec.Deposit, spec.BeaconState]:
- genesis_deposits = genesis.create_deposits(
- keys.pubkeys[:initial_validator_count],
- keys.withdrawal_creds[:initial_validator_count]
- )
- state = genesis.create_genesis_state(genesis_deposits)
-
- deposit_data_leaves = [dep.data.hash_tree_root() for dep in genesis_deposits]
-
- deposit = build_deposit(
- state,
- deposit_data_leaves,
- keys.pubkeys[index],
- keys.withdrawal_creds[index],
- keys.privkeys[index],
- spec.MAX_EFFECTIVE_BALANCE,
- )
-
- state.latest_eth1_data.deposit_root = get_merkle_root(tuple(deposit_data_leaves))
- state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
-
- return deposit, state
-
-
-@to_dict
-def valid_deposit():
- new_dep, state = build_deposit_for_index(10, 10)
- yield 'description', 'valid deposit to add new validator'
- yield 'pre', encode(state, spec.BeaconState)
- yield 'deposit', encode(new_dep, spec.Deposit)
- spec.process_deposit(state, new_dep)
- yield 'post', encode(state, spec.BeaconState)
-
-
-@to_dict
-def valid_topup():
- new_dep, state = build_deposit_for_index(10, 3)
- yield 'description', 'valid deposit to top-up existing validator'
- yield 'pre', encode(state, spec.BeaconState)
- yield 'deposit', encode(new_dep, spec.Deposit)
- spec.process_deposit(state, new_dep)
- yield 'post', encode(state, spec.BeaconState)
-
-
-@to_dict
-def invalid_deposit_index():
- new_dep, state = build_deposit_for_index(10, 10)
- # Mess up deposit index, 1 too small
- state.deposit_index = 9
-
- yield 'description', 'invalid deposit index'
- yield 'pre', encode(state, spec.BeaconState)
- yield 'deposit', encode(new_dep, spec.Deposit)
- try:
- spec.process_deposit(state, new_dep)
- except AssertionError:
- # expected
- yield 'post', None
- return
- raise Exception('invalid_deposit_index has unexpectedly allowed deposit')
-
-
-@to_dict
-def invalid_deposit_proof():
- new_dep, state = build_deposit_for_index(10, 10)
- # Make deposit proof invalid (at bottom of proof)
- new_dep.proof[-1] = spec.ZERO_HASH
-
- yield 'description', 'invalid deposit proof'
- yield 'pre', encode(state, spec.BeaconState)
- yield 'deposit', encode(new_dep, spec.Deposit)
- try:
- spec.process_deposit(state, new_dep)
- except AssertionError:
- # expected
- yield 'post', None
- return
- raise Exception('invalid_deposit_index has unexpectedly allowed deposit')
-
-
-@to_tuple
-def deposit_cases():
- yield valid_deposit()
- yield valid_topup()
- yield invalid_deposit_index()
- yield invalid_deposit_proof()
-
-
-def mini_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
- presets = loader.load_presets(configs_path, 'minimal')
- spec.apply_constants_preset(presets)
-
- return ("deposit_minimal", "deposits", gen_suite.render_suite(
- title="deposit operation",
- summary="Test suite for deposit type operation processing",
- forks_timeline="testing",
- forks=["phase0"],
- config="minimal",
- runner="operations",
- handler="deposits",
- test_cases=deposit_cases()))
-
-
-def full_deposits_suite(configs_path: str) -> gen_typing.TestSuiteOutput:
- presets = loader.load_presets(configs_path, 'mainnet')
- spec.apply_constants_preset(presets)
-
- return ("deposit_full", "deposits", gen_suite.render_suite(
- title="deposit operation",
- summary="Test suite for deposit type operation processing",
- forks_timeline="mainnet",
- forks=["phase0"],
- config="mainnet",
- runner="operations",
- handler="deposits",
- test_cases=deposit_cases()))
diff --git a/test_generators/operations/genesis.py b/test_generators/operations/genesis.py
deleted file mode 100644
index f4d63c10ec..0000000000
--- a/test_generators/operations/genesis.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from eth2spec.phase0 import spec
-from eth2spec.utils.merkle_minimal import get_merkle_root, calc_merkle_tree_from_leaves, get_merkle_proof
-from typing import List
-
-
-def create_genesis_state(deposits: List[spec.Deposit]) -> spec.BeaconState:
- deposit_root = get_merkle_root((tuple([(dep.data.hash_tree_root()) for dep in deposits])))
-
- return spec.get_genesis_beacon_state(
- deposits,
- genesis_time=0,
- genesis_eth1_data=spec.Eth1Data(
- deposit_root=deposit_root,
- deposit_count=len(deposits),
- block_hash=spec.ZERO_HASH,
- ),
- )
-
-
-def create_deposits(pubkeys: List[spec.BLSPubkey], withdrawal_cred: List[spec.Bytes32]) -> List[spec.Deposit]:
-
- # Mock proof of possession
- proof_of_possession = b'\x33' * 96
-
- deposit_data = [
- spec.DepositData(
- pubkey=pubkeys[i],
- withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + withdrawal_cred[i][1:],
- amount=spec.MAX_EFFECTIVE_BALANCE,
- proof_of_possession=proof_of_possession,
- ) for i in range(len(pubkeys))
- ]
-
- # Fill tree with existing deposits
- deposit_data_leaves = [data.hash_tree_root() for data in deposit_data]
- tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
-
- return [
- spec.Deposit(
- proof=list(get_merkle_proof(tree, item_index=i)),
- index=i,
- data=deposit_data[i]
- ) for i in range(len(deposit_data))
- ]
diff --git a/test_generators/operations/keys.py b/test_generators/operations/keys.py
deleted file mode 100644
index db4f59e0e6..0000000000
--- a/test_generators/operations/keys.py
+++ /dev/null
@@ -1,7 +0,0 @@
-from py_ecc import bls
-from eth2spec.phase0.spec import hash
-
-privkeys = list(range(1, 101))
-pubkeys = [bls.privtopub(k) for k in privkeys]
-# Insecure, but easier to follow
-withdrawal_creds = [hash(bls.privtopub(k)) for k in privkeys]
diff --git a/test_generators/operations/main.py b/test_generators/operations/main.py
index 8b0a2a6d83..96c639d12d 100644
--- a/test_generators/operations/main.py
+++ b/test_generators/operations/main.py
@@ -1,9 +1,53 @@
-from gen_base import gen_runner
+from typing import Callable, Iterable
+
+from eth2spec.test.block_processing import (
+ test_process_attestation,
+ test_process_attester_slashing,
+ test_process_block_header,
+ test_process_deposit,
+ test_process_proposer_slashing,
+ test_process_transfer,
+ test_process_voluntary_exit
+)
+
+from gen_base import gen_runner, gen_suite, gen_typing
+from gen_from_tests.gen import generate_from_tests
+from preset_loader import loader
+from eth2spec.phase0 import spec
+
+
+def create_suite(operation_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
+ -> Callable[[str], gen_typing.TestSuiteOutput]:
+ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
+ presets = loader.load_presets(configs_path, config_name)
+ spec.apply_constants_preset(presets)
+
+ return ("%s_%s" % (operation_name, config_name), operation_name, gen_suite.render_suite(
+ title="%s operation" % operation_name,
+ summary="Test suite for %s type operation processing" % operation_name,
+ forks_timeline="testing",
+ forks=["phase0"],
+ config=config_name,
+ runner="operations",
+ handler=operation_name,
+ test_cases=get_cases()))
+ return suite_definition
-from deposits import mini_deposits_suite, full_deposits_suite
if __name__ == "__main__":
gen_runner.run_generator("operations", [
- mini_deposits_suite,
- full_deposits_suite
+ create_suite('attestation', 'minimal', lambda: generate_from_tests(test_process_attestation)),
+ create_suite('attestation', 'mainnet', lambda: generate_from_tests(test_process_attestation)),
+ create_suite('attester_slashing', 'minimal', lambda: generate_from_tests(test_process_attester_slashing)),
+ create_suite('attester_slashing', 'mainnet', lambda: generate_from_tests(test_process_attester_slashing)),
+ create_suite('block_header', 'minimal', lambda: generate_from_tests(test_process_block_header)),
+ create_suite('block_header', 'mainnet', lambda: generate_from_tests(test_process_block_header)),
+ create_suite('deposit', 'minimal', lambda: generate_from_tests(test_process_deposit)),
+ create_suite('deposit', 'mainnet', lambda: generate_from_tests(test_process_deposit)),
+ create_suite('proposer_slashing', 'minimal', lambda: generate_from_tests(test_process_proposer_slashing)),
+ create_suite('proposer_slashing', 'mainnet', lambda: generate_from_tests(test_process_proposer_slashing)),
+ create_suite('transfer', 'minimal', lambda: generate_from_tests(test_process_transfer)),
+ create_suite('transfer', 'mainnet', lambda: generate_from_tests(test_process_transfer)),
+ create_suite('voluntary_exit', 'minimal', lambda: generate_from_tests(test_process_voluntary_exit)),
+ create_suite('voluntary_exit', 'mainnet', lambda: generate_from_tests(test_process_voluntary_exit)),
])
diff --git a/test_generators/operations/requirements.txt b/test_generators/operations/requirements.txt
index dfe8535365..595cee69cd 100644
--- a/test_generators/operations/requirements.txt
+++ b/test_generators/operations/requirements.txt
@@ -1,5 +1,4 @@
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
-../../test_libs/pyspec
-py_ecc
\ No newline at end of file
+../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_generators/sanity/README.md b/test_generators/sanity/README.md
new file mode 100644
index 0000000000..6d2e2f30dd
--- /dev/null
+++ b/test_generators/sanity/README.md
@@ -0,0 +1,8 @@
+# Sanity tests
+
+Sanity tests cover regular state-transitions in a common block-list format, to ensure the basics work.
+
+Information on the format of the tests can be found in the [sanity test formats documentation](../../specs/test_formats/sanity/README.md).
+
+
+
diff --git a/test_generators/sanity/main.py b/test_generators/sanity/main.py
new file mode 100644
index 0000000000..bba6ed03df
--- /dev/null
+++ b/test_generators/sanity/main.py
@@ -0,0 +1,35 @@
+from typing import Callable, Iterable
+
+from eth2spec.test.sanity import test_blocks, test_slots
+
+from gen_base import gen_runner, gen_suite, gen_typing
+from gen_from_tests.gen import generate_from_tests
+from preset_loader import loader
+from eth2spec.phase0 import spec
+
+
+def create_suite(handler_name: str, config_name: str, get_cases: Callable[[], Iterable[gen_typing.TestCase]]) \
+ -> Callable[[str], gen_typing.TestSuiteOutput]:
+ def suite_definition(configs_path: str) -> gen_typing.TestSuiteOutput:
+ presets = loader.load_presets(configs_path, config_name)
+ spec.apply_constants_preset(presets)
+
+ return ("%sanity_s_%s" % (handler_name, config_name), handler_name, gen_suite.render_suite(
+ title="sanity testing",
+ summary="Sanity test suite, %s type, generated from pytests" % handler_name,
+ forks_timeline="testing",
+ forks=["phase0"],
+ config=config_name,
+ runner="sanity",
+ handler=handler_name,
+ test_cases=get_cases()))
+ return suite_definition
+
+
+if __name__ == "__main__":
+ gen_runner.run_generator("sanity", [
+ create_suite('blocks', 'minimal', lambda: generate_from_tests(test_blocks)),
+ create_suite('blocks', 'mainnet', lambda: generate_from_tests(test_blocks)),
+ create_suite('slots', 'minimal', lambda: generate_from_tests(test_slots)),
+ create_suite('slots', 'mainnet', lambda: generate_from_tests(test_slots)),
+ ])
diff --git a/test_generators/sanity/requirements.txt b/test_generators/sanity/requirements.txt
new file mode 100644
index 0000000000..595cee69cd
--- /dev/null
+++ b/test_generators/sanity/requirements.txt
@@ -0,0 +1,4 @@
+eth-utils==1.6.0
+../../test_libs/gen_helpers
+../../test_libs/config_helpers
+../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_generators/shuffling/main.py b/test_generators/shuffling/main.py
index 9ca3e2d36f..7115971896 100644
--- a/test_generators/shuffling/main.py
+++ b/test_generators/shuffling/main.py
@@ -10,7 +10,7 @@
def shuffling_case(seed: spec.Bytes32, count: int):
yield 'seed', '0x' + seed.hex()
yield 'count', count
- yield 'shuffled', [spec.get_permuted_index(i, count, seed) for i in range(count)]
+ yield 'shuffled', [spec.get_shuffled_index(i, count, seed) for i in range(count)]
@to_tuple
diff --git a/test_generators/shuffling/requirements.txt b/test_generators/shuffling/requirements.txt
index 8f9bede8f3..595cee69cd 100644
--- a/test_generators/shuffling/requirements.txt
+++ b/test_generators/shuffling/requirements.txt
@@ -1,4 +1,4 @@
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_generators/ssz_generic/requirements.txt b/test_generators/ssz_generic/requirements.txt
index 94afc9d91b..dcdb0824ff 100644
--- a/test_generators/ssz_generic/requirements.txt
+++ b/test_generators/ssz_generic/requirements.txt
@@ -1,4 +1,4 @@
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
ssz==0.1.0a2
diff --git a/test_generators/ssz_static/main.py b/test_generators/ssz_static/main.py
index 1234294db9..e8995b9185 100644
--- a/test_generators/ssz_static/main.py
+++ b/test_generators/ssz_static/main.py
@@ -18,10 +18,7 @@
@to_dict
-def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMode, chaos: bool):
- typ = spec.get_ssz_type_by_name(name)
- value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos)
- yield "type_name", name
+def create_test_case_contents(value, typ):
yield "value", encode.encode(value, typ)
yield "serialized", '0x' + serialize(value).hex()
yield "root", '0x' + hash_tree_root(value).hex()
@@ -29,6 +26,13 @@ def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMod
yield "signing_root", '0x' + signing_root(value).hex()
+@to_dict
+def create_test_case(rng: Random, name: str, mode: random_value.RandomizationMode, chaos: bool):
+ typ = spec.get_ssz_type_by_name(name)
+ value = random_value.get_random_ssz_object(rng, typ, MAX_BYTES_LENGTH, MAX_LIST_LENGTH, mode, chaos)
+ yield name, create_test_case_contents(value, typ)
+
+
@to_tuple
def ssz_static_cases(rng: Random, mode: random_value.RandomizationMode, chaos: bool, count: int):
for type_name in spec.ssz_types:
diff --git a/test_generators/ssz_static/requirements.txt b/test_generators/ssz_static/requirements.txt
index 8f9bede8f3..595cee69cd 100644
--- a/test_generators/ssz_static/requirements.txt
+++ b/test_generators/ssz_static/requirements.txt
@@ -1,4 +1,4 @@
-eth-utils==1.4.1
+eth-utils==1.6.0
../../test_libs/gen_helpers
../../test_libs/config_helpers
../../test_libs/pyspec
\ No newline at end of file
diff --git a/test_libs/config_helpers/requirements.txt b/test_libs/config_helpers/requirements.txt
index e441a474b8..f2f208c3fb 100644
--- a/test_libs/config_helpers/requirements.txt
+++ b/test_libs/config_helpers/requirements.txt
@@ -1 +1 @@
-ruamel.yaml==0.15.87
+ruamel.yaml==0.15.96
diff --git a/test_libs/config_helpers/setup.py b/test_libs/config_helpers/setup.py
index 90ad94ee44..9f0ea06419 100644
--- a/test_libs/config_helpers/setup.py
+++ b/test_libs/config_helpers/setup.py
@@ -4,6 +4,6 @@
name='config_helpers',
packages=['preset_loader'],
install_requires=[
- "ruamel.yaml==0.15.87"
+ "ruamel.yaml==0.15.96"
]
)
diff --git a/test_libs/pyspec/tests/__init__.py b/test_libs/gen_helpers/gen_from_tests/__init__.py
similarity index 100%
rename from test_libs/pyspec/tests/__init__.py
rename to test_libs/gen_helpers/gen_from_tests/__init__.py
diff --git a/test_libs/gen_helpers/gen_from_tests/gen.py b/test_libs/gen_helpers/gen_from_tests/gen.py
new file mode 100644
index 0000000000..e7d8011310
--- /dev/null
+++ b/test_libs/gen_helpers/gen_from_tests/gen.py
@@ -0,0 +1,25 @@
+from inspect import getmembers, isfunction
+
+def generate_from_tests(src, bls_active=True):
+ """
+ Generate a list of test cases by running tests from the given src in generator-mode.
+ :param src: to retrieve tests from (discovered using inspect.getmembers)
+ :param bls_active: optional, to override BLS switch preference. Defaults to True.
+ :return: the list of test cases.
+ """
+ fn_names = [
+ name for (name, _) in getmembers(src, isfunction)
+ if name.startswith('test_')
+ ]
+ out = []
+ print("generating test vectors from tests source: %s" % src.__name__)
+ for name in fn_names:
+ tfn = getattr(src, name)
+ try:
+ test_case = tfn(generator_mode=True, bls_active=bls_active)
+ # If no test case data is returned, the test is ignored.
+ if test_case is not None:
+ out.append(test_case)
+ except AssertionError:
+ print("ERROR: failed to generate vector from test: %s (src: %s)" % (name, src.__name__))
+ return out
diff --git a/test_libs/gen_helpers/requirements.txt b/test_libs/gen_helpers/requirements.txt
index 3d6a39458e..557cae6317 100644
--- a/test_libs/gen_helpers/requirements.txt
+++ b/test_libs/gen_helpers/requirements.txt
@@ -1,2 +1,2 @@
-ruamel.yaml==0.15.87
-eth-utils==1.4.1
+ruamel.yaml==0.15.96
+eth-utils==1.6.0
diff --git a/test_libs/gen_helpers/setup.py b/test_libs/gen_helpers/setup.py
index 5de27a6dbe..ee2c815c76 100644
--- a/test_libs/gen_helpers/setup.py
+++ b/test_libs/gen_helpers/setup.py
@@ -2,9 +2,9 @@
setup(
name='gen_helpers',
- packages=['gen_base'],
+ packages=['gen_base', 'gen_from_tests'],
install_requires=[
- "ruamel.yaml==0.15.87",
- "eth-utils==1.4.1"
+ "ruamel.yaml==0.15.96",
+ "eth-utils==1.6.0"
]
)
diff --git a/test_libs/pyspec/README.md b/test_libs/pyspec/README.md
index bb6991a930..2c2226ee70 100644
--- a/test_libs/pyspec/README.md
+++ b/test_libs/pyspec/README.md
@@ -46,8 +46,9 @@ The `-B` flag may be helpful to force-overwrite the `pyspec` output after you ma
Run the tests:
```
-pytest --config=minimal
+pytest --config=minimal eth2spec
```
+Note the package-name, this is to locate the tests.
## Contributing
diff --git a/test_libs/pyspec/eth2spec/test/__init__.py b/test_libs/pyspec/eth2spec/test/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/__init__.py b/test_libs/pyspec/eth2spec/test/block_processing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py
new file mode 100644
index 0000000000..700d68b535
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attestation.py
@@ -0,0 +1,301 @@
+from copy import deepcopy
+
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ get_current_epoch,
+ process_attestation,
+ process_slots,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.attestations import (
+ get_valid_attestation,
+ sign_attestation,
+)
+from eth2spec.test.helpers.state import (
+ next_epoch,
+ next_slot,
+)
+from eth2spec.test.helpers.block import apply_empty_block
+
+
+def run_attestation_processing(state, attestation, valid=True):
+ """
+ Run ``process_attestation``, yielding:
+ - pre-state ('pre')
+ - attestation ('attestation')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+ # yield pre-state
+ yield 'pre', state
+
+ yield 'attestation', attestation
+
+ # If the attestation is invalid, processing is aborted, and there is no post-state.
+ if not valid:
+ expect_assertion_error(lambda: process_attestation(state, attestation))
+ yield 'post', None
+ return
+
+ current_epoch_count = len(state.current_epoch_attestations)
+ previous_epoch_count = len(state.previous_epoch_attestations)
+
+ # process attestation
+ process_attestation(state, attestation)
+
+ # Make sure the attestation has been processed
+ if attestation.data.target_epoch == get_current_epoch(state):
+ assert len(state.current_epoch_attestations) == current_epoch_count + 1
+ else:
+ assert len(state.previous_epoch_attestations) == previous_epoch_count + 1
+
+ # yield post-state
+ yield 'post', state
+
+
+@spec_state_test
+def test_success(state):
+ attestation = get_valid_attestation(state, signed=True)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ yield from run_attestation_processing(state, attestation)
+
+
+@spec_state_test
+def test_success_previous_epoch(state):
+ attestation = get_valid_attestation(state, signed=True)
+ next_epoch(state)
+ apply_empty_block(state)
+
+ yield from run_attestation_processing(state, attestation)
+
+
+@spec_state_test
+def test_success_since_max_epochs_per_crosslink(state):
+ for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ attestation = get_valid_attestation(state, signed=True)
+ data = attestation.data
+ # test logic sanity check: make sure the attestation only includes MAX_EPOCHS_PER_CROSSLINK epochs
+ assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
+
+ for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
+ next_slot(state)
+ apply_empty_block(state)
+
+ yield from run_attestation_processing(state, attestation)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_attestation_signature(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_before_inclusion_delay(state):
+ attestation = get_valid_attestation(state, signed=True)
+ # do not increment slot to allow for inclusion delay
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_after_epoch_slots(state):
+ attestation = get_valid_attestation(state, signed=True)
+ # increment past latest inclusion slot
+ process_slots(state, state.slot + spec.SLOTS_PER_EPOCH + 1)
+ apply_empty_block(state)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_old_source_epoch(state):
+ state.slot = spec.SLOTS_PER_EPOCH * 5
+ state.finalized_epoch = 2
+ state.previous_justified_epoch = 3
+ state.current_justified_epoch = 4
+ attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1)
+
+ # test logic sanity check: make sure the attestation is pointing to oldest known source epoch
+ assert attestation.data.source_epoch == state.previous_justified_epoch
+
+ # Now go beyond that, it will be invalid
+ attestation.data.source_epoch -= 1
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_wrong_shard(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.data.crosslink.shard += 1
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_new_source_epoch(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.data.source_epoch += 1
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_source_root_is_target_root(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.data.source_root = attestation.data.target_root
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_invalid_current_source_root(state):
+ state.slot = spec.SLOTS_PER_EPOCH * 5
+ state.finalized_epoch = 2
+
+ state.previous_justified_epoch = 3
+ state.previous_justified_root = b'\x01' * 32
+
+ state.current_justified_epoch = 4
+ state.current_justified_root = b'\xff' * 32
+
+ attestation = get_valid_attestation(state, slot=(spec.SLOTS_PER_EPOCH * 3) + 1)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ # Test logic sanity checks:
+ assert state.current_justified_root != state.previous_justified_root
+ assert attestation.data.source_root == state.previous_justified_root
+
+ # Make attestation source root invalid: should be previous justified, not current one
+ attestation.data.source_root = state.current_justified_root
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_bad_source_root(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.data.source_root = b'\x42' * 32
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_non_zero_crosslink_data_root(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.data.crosslink.data_root = b'\x42' * 32
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_bad_parent_crosslink(state):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ attestation = get_valid_attestation(state, signed=True)
+ for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
+ next_slot(state)
+ apply_empty_block(state)
+
+ attestation.data.crosslink.parent_root = b'\x27' * 32
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_bad_crosslink_start_epoch(state):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ attestation = get_valid_attestation(state, signed=True)
+ for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
+ next_slot(state)
+ apply_empty_block(state)
+
+ attestation.data.crosslink.start_epoch += 1
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_bad_crosslink_end_epoch(state):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ attestation = get_valid_attestation(state, signed=True)
+ for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
+ next_slot(state)
+ apply_empty_block(state)
+
+ attestation.data.crosslink.end_epoch += 1
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_inconsistent_bitfields(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield) + b'\x00'
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_non_empty_custody_bitfield(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield)
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation, False)
+
+
+@spec_state_test
+def test_empty_aggregation_bitfield(state):
+ attestation = get_valid_attestation(state)
+ state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+
+ attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
+
+ sign_attestation(state, attestation)
+
+ yield from run_attestation_processing(state, attestation)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py
new file mode 100644
index 0000000000..28e2322772
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_attester_slashing.py
@@ -0,0 +1,149 @@
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ get_beacon_proposer_index,
+ process_attester_slashing,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.attestations import sign_indexed_attestation
+from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
+from eth2spec.test.helpers.block import apply_empty_block
+from eth2spec.test.helpers.state import (
+ get_balance,
+ next_epoch,
+)
+
+
+def run_attester_slashing_processing(state, attester_slashing, valid=True):
+ """
+ Run ``process_attester_slashing``, yielding:
+ - pre-state ('pre')
+ - attester_slashing ('attester_slashing')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+
+ yield 'pre', state
+ yield 'attester_slashing', attester_slashing
+
+ if not valid:
+ expect_assertion_error(lambda: process_attester_slashing(state, attester_slashing))
+ yield 'post', None
+ return
+
+ slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0]
+ pre_slashed_balance = get_balance(state, slashed_index)
+
+ proposer_index = get_beacon_proposer_index(state)
+ pre_proposer_balance = get_balance(state, proposer_index)
+
+ # Process slashing
+ process_attester_slashing(state, attester_slashing)
+
+ slashed_validator = state.validator_registry[slashed_index]
+
+ # Check slashing
+ assert slashed_validator.slashed
+ assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
+ assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
+
+ if slashed_index != proposer_index:
+ # lost whistleblower reward
+ assert get_balance(state, slashed_index) < pre_slashed_balance
+ # gained whistleblower reward
+ assert get_balance(state, proposer_index) > pre_proposer_balance
+ else:
+ # gained rewards for all slashings, which may include others. And only lost that of themselves.
+ # Netto at least 0, if more people where slashed, a balance increase.
+ assert get_balance(state, slashed_index) >= pre_slashed_balance
+
+ yield 'post', state
+
+
+@spec_state_test
+def test_success_double(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True)
+
+ yield from run_attester_slashing_processing(state, attester_slashing)
+
+
+@spec_state_test
+def test_success_surround(state):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ state.current_justified_epoch += 1
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True)
+
+ # set attestion1 to surround attestation 2
+ attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1
+ attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1
+
+ sign_indexed_attestation(state, attester_slashing.attestation_1)
+
+ yield from run_attester_slashing_processing(state, attester_slashing)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_1(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True)
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_2(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=False)
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_1_and_2(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=False)
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@spec_state_test
+def test_same_data(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True)
+
+ attester_slashing.attestation_1.data = attester_slashing.attestation_2.data
+ sign_indexed_attestation(state, attester_slashing.attestation_1)
+
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@spec_state_test
+def test_no_double_or_surround(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True)
+
+ attester_slashing.attestation_1.data.target_epoch += 1
+ sign_indexed_attestation(state, attester_slashing.attestation_1)
+
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@spec_state_test
+def test_participants_already_slashed(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True)
+
+ # set all indices to slashed
+ attestation_1 = attester_slashing.attestation_1
+ validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
+ for index in validator_indices:
+ state.validator_registry[index].slashed = True
+
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
+
+
+@spec_state_test
+def test_custody_bit_0_and_1(state):
+ attester_slashing = get_valid_attester_slashing(state, signed_1=False, signed_2=True)
+
+ attester_slashing.attestation_1.custody_bit_1_indices = (
+ attester_slashing.attestation_1.custody_bit_0_indices
+ )
+ sign_indexed_attestation(state, attester_slashing.attestation_1)
+
+ yield from run_attester_slashing_processing(state, attester_slashing, False)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py
new file mode 100644
index 0000000000..8a67be7416
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_block_header.py
@@ -0,0 +1,85 @@
+from copy import deepcopy
+
+from eth2spec.phase0.spec import (
+ get_beacon_proposer_index,
+ process_slots,
+ process_block_header,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.block import (
+ build_empty_block_for_next_slot,
+ sign_block
+)
+from eth2spec.test.helpers.state import next_slot
+
+
+def prepare_state_for_header_processing(state):
+ process_slots(state, state.slot + 1)
+
+
+def run_block_header_processing(state, block, valid=True):
+ """
+ Run ``process_block_header``, yielding:
+ - pre-state ('pre')
+ - block ('block')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+ prepare_state_for_header_processing(state)
+
+ yield 'pre', state
+ yield 'block', block
+
+ if not valid:
+ expect_assertion_error(lambda: process_block_header(state, block))
+ yield 'post', None
+ return
+
+ process_block_header(state, block)
+ yield 'post', state
+
+
+@spec_state_test
+def test_success_block_header(state):
+ block = build_empty_block_for_next_slot(state, signed=True)
+ yield from run_block_header_processing(state, block)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_block_header(state):
+ block = build_empty_block_for_next_slot(state)
+ yield from run_block_header_processing(state, block, valid=False)
+
+
+@spec_state_test
+def test_invalid_slot_block_header(state):
+ block = build_empty_block_for_next_slot(state)
+ block.slot = state.slot + 2 # invalid slot
+ sign_block(state, block)
+
+ yield from run_block_header_processing(state, block, valid=False)
+
+
+@spec_state_test
+def test_invalid_parent_root(state):
+ block = build_empty_block_for_next_slot(state)
+ block.parent_root = b'\12' * 32 # invalid prev root
+ sign_block(state, block)
+
+ yield from run_block_header_processing(state, block, valid=False)
+
+
+@spec_state_test
+def test_proposer_slashed(state):
+ # use stub state to get proposer index of next slot
+ stub_state = deepcopy(state)
+ next_slot(stub_state)
+ proposer_index = get_beacon_proposer_index(stub_state)
+
+ # set proposer to slashed
+ state.validator_registry[proposer_index].slashed = True
+
+ block = build_empty_block_for_next_slot(state, signed=True)
+
+ yield from run_block_header_processing(state, block, valid=False)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py
new file mode 100644
index 0000000000..336af3bf73
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_deposit.py
@@ -0,0 +1,124 @@
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import process_deposit
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.deposits import prepare_state_and_deposit, sign_deposit_data
+from eth2spec.test.helpers.state import get_balance
+from eth2spec.test.helpers.keys import privkeys
+
+
+def run_deposit_processing(state, deposit, validator_index, valid=True, effective=True):
+ """
+ Run ``process_deposit``, yielding:
+ - pre-state ('pre')
+ - deposit ('deposit')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+ pre_validator_count = len(state.validator_registry)
+ pre_balance = 0
+ if validator_index < pre_validator_count:
+ pre_balance = get_balance(state, validator_index)
+ else:
+ # if it is a new validator, it should be right at the end of the current registry.
+ assert validator_index == pre_validator_count
+
+ yield 'pre', state
+ yield 'deposit', deposit
+
+ if not valid:
+ expect_assertion_error(lambda: process_deposit(state, deposit))
+ yield 'post', None
+ return
+
+ process_deposit(state, deposit)
+
+ yield 'post', state
+
+ if not effective:
+ assert len(state.validator_registry) == pre_validator_count
+ assert len(state.balances) == pre_validator_count
+ if validator_index < pre_validator_count:
+ assert get_balance(state, validator_index) == pre_balance
+ else:
+ if validator_index < pre_validator_count:
+ # top-up
+ assert len(state.validator_registry) == pre_validator_count
+ assert len(state.balances) == pre_validator_count
+ else:
+ # new validator
+ assert len(state.validator_registry) == pre_validator_count + 1
+ assert len(state.balances) == pre_validator_count + 1
+ assert get_balance(state, validator_index) == pre_balance + deposit.data.amount
+
+ assert state.deposit_index == state.latest_eth1_data.deposit_count
+
+
+@spec_state_test
+def test_new_deposit(state):
+ # fresh deposit = next validator index = validator appended to registry
+ validator_index = len(state.validator_registry)
+ amount = spec.MAX_EFFECTIVE_BALANCE
+ deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True)
+
+ yield from run_deposit_processing(state, deposit, validator_index)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_new_deposit(state):
+ # fresh deposit = next validator index = validator appended to registry
+ validator_index = len(state.validator_registry)
+ amount = spec.MAX_EFFECTIVE_BALANCE
+ deposit = prepare_state_and_deposit(state, validator_index, amount)
+ yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=False)
+
+
+@spec_state_test
+def test_success_top_up(state):
+ validator_index = 0
+ amount = spec.MAX_EFFECTIVE_BALANCE // 4
+ deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True)
+
+ yield from run_deposit_processing(state, deposit, validator_index)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_top_up(state):
+ validator_index = 0
+ amount = spec.MAX_EFFECTIVE_BALANCE // 4
+ deposit = prepare_state_and_deposit(state, validator_index, amount)
+
+ # invalid signatures, in top-ups, are allowed!
+ yield from run_deposit_processing(state, deposit, validator_index, valid=True, effective=True)
+
+
+@spec_state_test
+def test_wrong_index(state):
+ validator_index = len(state.validator_registry)
+ amount = spec.MAX_EFFECTIVE_BALANCE
+ deposit = prepare_state_and_deposit(state, validator_index, amount)
+
+ # mess up deposit_index
+ deposit.index = state.deposit_index + 1
+
+ sign_deposit_data(state, deposit.data, privkeys[validator_index])
+
+ yield from run_deposit_processing(state, deposit, validator_index, valid=False)
+
+
+# TODO: test invalid signature
+
+
+@spec_state_test
+def test_bad_merkle_proof(state):
+ validator_index = len(state.validator_registry)
+ amount = spec.MAX_EFFECTIVE_BALANCE
+ deposit = prepare_state_and_deposit(state, validator_index, amount)
+
+ # mess up merkle branch
+ deposit.proof[-1] = spec.ZERO_HASH
+
+ sign_deposit_data(state, deposit.data, privkeys[validator_index])
+
+ yield from run_deposit_processing(state, deposit, validator_index, valid=False)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py
new file mode 100644
index 0000000000..07ccc25f1c
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_proposer_slashing.py
@@ -0,0 +1,137 @@
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ get_current_epoch,
+ process_proposer_slashing,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.block_header import sign_block_header
+from eth2spec.test.helpers.keys import privkeys
+from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
+from eth2spec.test.helpers.state import get_balance
+
+
+def run_proposer_slashing_processing(state, proposer_slashing, valid=True):
+ """
+ Run ``process_proposer_slashing``, yielding:
+ - pre-state ('pre')
+ - proposer_slashing ('proposer_slashing')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+
+ yield 'pre', state
+ yield 'proposer_slashing', proposer_slashing
+
+ if not valid:
+ expect_assertion_error(lambda: process_proposer_slashing(state, proposer_slashing))
+ yield 'post', None
+ return
+
+ pre_proposer_balance = get_balance(state, proposer_slashing.proposer_index)
+
+ process_proposer_slashing(state, proposer_slashing)
+ yield 'post', state
+
+ # check if slashed
+ slashed_validator = state.validator_registry[proposer_slashing.proposer_index]
+ assert slashed_validator.slashed
+ assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
+ assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
+
+ # lost whistleblower reward
+ assert (
+ get_balance(state, proposer_slashing.proposer_index) <
+ pre_proposer_balance
+ )
+
+
+@spec_state_test
+def test_success(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_1(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=True)
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_2(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False)
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_sig_1_and_2(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=False, signed_2=False)
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_invalid_proposer_index(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+ # Index just too high (by 1)
+ proposer_slashing.proposer_index = len(state.validator_registry)
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_epochs_are_different(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False)
+
+ # set slots to be in different epochs
+ proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH
+ sign_block_header(state, proposer_slashing.header_2, privkeys[proposer_slashing.proposer_index])
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_headers_are_same(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=False)
+
+ # set headers to be the same
+ proposer_slashing.header_2 = proposer_slashing.header_1
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_proposer_is_not_activated(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+
+ # set proposer to be not active yet
+ state.validator_registry[proposer_slashing.proposer_index].activation_epoch = get_current_epoch(state) + 1
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_proposer_is_slashed(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+
+ # set proposer to slashed
+ state.validator_registry[proposer_slashing.proposer_index].slashed = True
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
+
+
+@spec_state_test
+def test_proposer_is_withdrawn(state):
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+
+ # move 1 epoch into future, to allow for past withdrawable epoch
+ state.slot += spec.SLOTS_PER_EPOCH
+ # set proposer withdrawable_epoch in past
+ current_epoch = get_current_epoch(state)
+ proposer_index = proposer_slashing.proposer_index
+ state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1
+
+ yield from run_proposer_slashing_processing(state, proposer_slashing, False)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py
new file mode 100644
index 0000000000..83af755743
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_transfer.py
@@ -0,0 +1,172 @@
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ get_active_validator_indices,
+ get_beacon_proposer_index,
+ get_current_epoch,
+ process_transfer,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.state import next_epoch
+from eth2spec.test.helpers.block import apply_empty_block
+from eth2spec.test.helpers.transfers import get_valid_transfer
+
+
+def run_transfer_processing(state, transfer, valid=True):
+ """
+ Run ``process_transfer``, yielding:
+ - pre-state ('pre')
+ - transfer ('transfer')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+
+ proposer_index = get_beacon_proposer_index(state)
+ pre_transfer_sender_balance = state.balances[transfer.sender]
+ pre_transfer_recipient_balance = state.balances[transfer.recipient]
+ pre_transfer_proposer_balance = state.balances[proposer_index]
+
+ yield 'pre', state
+ yield 'transfer', transfer
+
+ if not valid:
+ expect_assertion_error(lambda: process_transfer(state, transfer))
+ yield 'post', None
+ return
+
+ process_transfer(state, transfer)
+ yield 'post', state
+
+ sender_balance = state.balances[transfer.sender]
+ recipient_balance = state.balances[transfer.recipient]
+ assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee
+ assert recipient_balance == pre_transfer_recipient_balance + transfer.amount
+ assert state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee
+
+
+@spec_state_test
+def test_success_non_activated(state):
+ transfer = get_valid_transfer(state, signed=True)
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer)
+
+
+@spec_state_test
+def test_success_withdrawable(state):
+ next_epoch(state)
+ apply_empty_block(state)
+
+ transfer = get_valid_transfer(state, signed=True)
+
+ # withdrawable_epoch in past so can transfer
+ state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1
+
+ yield from run_transfer_processing(state, transfer)
+
+
+@spec_state_test
+def test_success_active_above_max_effective(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True)
+
+ yield from run_transfer_processing(state, transfer)
+
+
+@spec_state_test
+def test_success_active_above_max_effective_fee(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True)
+
+ yield from run_transfer_processing(state, transfer)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_signature(state):
+ transfer = get_valid_transfer(state)
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_active_but_transfer_past_effective_balance(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ amount = spec.MAX_EFFECTIVE_BALANCE // 32
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0, signed=True)
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_incorrect_slot(state):
+ transfer = get_valid_transfer(state, slot=state.slot + 1, signed=True)
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_insufficient_balance_for_fee(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=0, fee=1, signed=True)
+
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_insufficient_balance(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True)
+
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_no_dust_sender(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ balance = state.balances[sender_index]
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0, signed=True)
+
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_no_dust_recipient(state):
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + 1
+ transfer = get_valid_transfer(state, sender_index=sender_index, amount=1, fee=0, signed=True)
+ state.balances[transfer.recipient] = 0
+
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
+
+
+@spec_state_test
+def test_invalid_pubkey(state):
+ transfer = get_valid_transfer(state, signed=True)
+ state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
+
+ # un-activate so validator can transfer
+ state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield from run_transfer_processing(state, transfer, False)
diff --git a/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py b/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py
new file mode 100644
index 0000000000..53fb4e3f7c
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/block_processing/test_process_voluntary_exit.py
@@ -0,0 +1,225 @@
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ get_active_validator_indices,
+ get_churn_limit,
+ get_current_epoch,
+ process_voluntary_exit,
+)
+from eth2spec.test.context import spec_state_test, expect_assertion_error, always_bls
+from eth2spec.test.helpers.keys import pubkey_to_privkey
+from eth2spec.test.helpers.voluntary_exits import build_voluntary_exit, sign_voluntary_exit
+
+
+def run_voluntary_exit_processing(state, voluntary_exit, valid=True):
+ """
+ Run ``process_voluntary_exit``, yielding:
+ - pre-state ('pre')
+ - voluntary_exit ('voluntary_exit')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+ validator_index = voluntary_exit.validator_index
+
+ yield 'pre', state
+ yield 'voluntary_exit', voluntary_exit
+
+ if not valid:
+ expect_assertion_error(lambda: process_voluntary_exit(state, voluntary_exit))
+ yield 'post', None
+ return
+
+ pre_exit_epoch = state.validator_registry[validator_index].exit_epoch
+
+ process_voluntary_exit(state, voluntary_exit)
+
+ yield 'post', state
+
+ assert pre_exit_epoch == spec.FAR_FUTURE_EPOCH
+ assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+
+
+@spec_state_test
+def test_success(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey, signed=True)
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit)
+
+
+@always_bls
+@spec_state_test
+def test_invalid_signature(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ voluntary_exit = build_voluntary_exit(state, current_epoch, validator_index, privkey)
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
+
+
+@spec_state_test
+def test_success_exit_queue(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+
+ # exit `MAX_EXITS_PER_EPOCH`
+ initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)]
+
+ # Prepare a bunch of exits, based on the current state
+ exit_queue = []
+ for index in initial_indices:
+ privkey = pubkey_to_privkey[state.validator_registry[index].pubkey]
+ exit_queue.append(build_voluntary_exit(
+ state,
+ current_epoch,
+ index,
+ privkey,
+ signed=True,
+ ))
+
+ # Now run all the exits
+ for voluntary_exit in exit_queue:
+ # the function yields data, but we are just interested in running it here, ignore yields.
+ for _ in run_voluntary_exit_processing(state, voluntary_exit):
+ continue
+
+ # exit an additional validator
+ validator_index = get_active_validator_indices(state, current_epoch)[-1]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=True,
+ )
+
+ # This is the interesting part of the test: on a pre-state with a full exit queue,
+ # when processing an additional exit, it results in an exit in a later epoch
+ yield from run_voluntary_exit_processing(state, voluntary_exit)
+
+ assert (
+ state.validator_registry[validator_index].exit_epoch ==
+ state.validator_registry[initial_indices[0]].exit_epoch + 1
+ )
+
+
+@spec_state_test
+def test_validator_exit_in_future(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=False,
+ )
+ voluntary_exit.epoch += 1
+ sign_voluntary_exit(state, voluntary_exit, privkey)
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
+
+
+@spec_state_test
+def test_validator_invalid_validator_index(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=False,
+ )
+ voluntary_exit.validator_index = len(state.validator_registry)
+ sign_voluntary_exit(state, voluntary_exit, privkey)
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
+
+
+@spec_state_test
+def test_validator_not_active(state):
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
+
+ # build and test voluntary exit
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=True,
+ )
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
+
+
+@spec_state_test
+def test_validator_already_exited(state):
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ # but validator already has exited
+ state.validator_registry[validator_index].exit_epoch = current_epoch + 2
+
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=True,
+ )
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
+
+
+@spec_state_test
+def test_validator_not_active_long_enough(state):
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[0]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+
+ voluntary_exit = build_voluntary_exit(
+ state,
+ current_epoch,
+ validator_index,
+ privkey,
+ signed=True,
+ )
+
+ assert (
+ current_epoch - state.validator_registry[validator_index].activation_epoch <
+ spec.PERSISTENT_COMMITTEE_PERIOD
+ )
+
+ yield from run_voluntary_exit_processing(state, voluntary_exit, False)
diff --git a/test_libs/pyspec/eth2spec/test/conftest.py b/test_libs/pyspec/eth2spec/test/conftest.py
new file mode 100644
index 0000000000..dadb0d5d06
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/conftest.py
@@ -0,0 +1,36 @@
+from eth2spec.phase0 import spec
+
+# We import pytest only when it's present, i.e. when we are running tests.
+# The test-cases themselves can be generated without installing pytest.
+
+def module_exists(module_name):
+ try:
+ __import__(module_name)
+ except ImportError:
+ return False
+ else:
+ return True
+
+
+def fixture(*args, **kwargs):
+ if module_exists("pytest"):
+ import pytest
+ return pytest.fixture(*args, **kwargs)
+ else:
+ def ignore():
+ pass
+ return ignore
+
+
+def pytest_addoption(parser):
+ parser.addoption(
+ "--config", action="store", default="minimal", help="config: make the pyspec use the specified configuration"
+ )
+
+
+@fixture(autouse=True)
+def config(request):
+ config_name = request.config.getoption("--config")
+ from preset_loader import loader
+ presets = loader.load_presets('../../configs/', config_name)
+ spec.apply_constants_preset(presets)
diff --git a/test_libs/pyspec/eth2spec/test/context.py b/test_libs/pyspec/eth2spec/test/context.py
new file mode 100644
index 0000000000..2be9322de2
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/context.py
@@ -0,0 +1,82 @@
+from eth2spec.phase0 import spec
+from eth2spec.utils import bls
+
+from .helpers.genesis import create_genesis_state
+
+from .utils import spectest, with_args, with_tags
+
+# Provides a genesis state as first argument to the function decorated with this
+with_state = with_args(lambda: [create_genesis_state(spec.SLOTS_PER_EPOCH * 8)])
+
+
+# BLS is turned off by default *for performance purposes during TESTING*.
+# The runner of the test can indicate the preferred setting (test generators prefer BLS to be ON).
+# - Some tests are marked as BLS-requiring, and ignore this setting.
+# (tests that express differences caused by BLS, e.g. invalid signatures being rejected)
+# - Some other tests are marked as BLS-ignoring, and ignore this setting.
+# (tests that are heavily performance impacted / require unsigned state transitions)
+# - Most tests respect the BLS setting.
+DEFAULT_BLS_ACTIVE = False
+
+
+# shorthand for decorating @with_state @spectest()
+def spec_state_test(fn):
+ return with_state(bls_switch(spectest()(fn)))
+
+
+def expect_assertion_error(fn):
+ bad = False
+ try:
+ fn()
+ bad = True
+ except AssertionError:
+ pass
+ except IndexError:
+ # Index errors are special; the spec is not explicit on bound checking, an IndexError is like a failed assert.
+ pass
+ if bad:
+ raise AssertionError('expected an assertion error, but got none.')
+
+
+# Tags a test to be ignoring BLS for it to pass.
+bls_ignored = with_tags({'bls_setting': 2})
+
+
+def never_bls(fn):
+ """
+ Decorator to apply on ``bls_switch`` decorator to force BLS de-activation. Useful to mark tests as BLS-ignorant.
+ """
+ def entry(*args, **kw):
+ # override bls setting
+ kw['bls_active'] = False
+ return fn(*args, **kw)
+ return bls_ignored(entry)
+
+
+# Tags a test to be requiring BLS for it to pass.
+bls_required = with_tags({'bls_setting': 1})
+
+
+def always_bls(fn):
+ """
+ Decorator to apply on ``bls_switch`` decorator to force BLS activation. Useful to mark tests as BLS-dependent.
+ """
+ def entry(*args, **kw):
+ # override bls setting
+ kw['bls_active'] = True
+ return fn(*args, **kw)
+ return bls_required(entry)
+
+
+def bls_switch(fn):
+ """
+ Decorator to make a function execute with BLS ON, or BLS off.
+ Based on an optional bool argument ``bls_active``, passed to the function at runtime.
+ """
+ def entry(*args, **kw):
+ old_state = bls.bls_active
+ bls.bls_active = kw.pop('bls_active', DEFAULT_BLS_ACTIVE)
+ out = fn(*args, **kw)
+ bls.bls_active = old_state
+ return out
+ return entry
diff --git a/test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py b/test_libs/pyspec/eth2spec/test/epoch_processing/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py
similarity index 62%
rename from test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py
rename to test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py
index 347a507efe..bb308a9f08 100644
--- a/test_libs/pyspec/tests/epoch_processing/test_process_crosslinks.py
+++ b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_crosslinks.py
@@ -1,5 +1,4 @@
from copy import deepcopy
-import pytest
import eth2spec.phase0.spec as spec
@@ -9,106 +8,123 @@
process_crosslinks,
state_transition,
)
-from tests.helpers import (
+from eth2spec.test.context import spec_state_test
+from eth2spec.test.helpers.state import (
+ next_epoch,
+ next_slot
+)
+from eth2spec.test.helpers.block import apply_empty_block, sign_block
+from eth2spec.test.helpers.attestations import (
add_attestation_to_state,
build_empty_block_for_next_slot,
fill_aggregate_attestation,
get_crosslink_committee,
get_valid_attestation,
- next_epoch,
- next_slot,
- set_bitfield_bit,
+ sign_attestation,
)
-# mark entire file as 'crosslinks'
-pytestmark = pytest.mark.crosslinks
-
-
def run_process_crosslinks(state, valid=True):
+ """
+ Run ``process_crosslinks``, yielding:
+ - pre-state ('pre')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
# transition state to slot before state transition
slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
block = build_empty_block_for_next_slot(state)
block.slot = slot
+ sign_block(state, block)
state_transition(state, block)
# cache state before epoch transition
process_slot(state)
- post_state = deepcopy(state)
- process_crosslinks(post_state)
-
- return state, post_state
+ yield 'pre', state
+ process_crosslinks(state)
+ yield 'post', state
+@spec_state_test
def test_no_attestations(state):
- pre_state, post_state = run_process_crosslinks(state)
+ yield from run_process_crosslinks(state)
for shard in range(spec.SHARD_COUNT):
- assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
-
- return pre_state, post_state
+ assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
+@spec_state_test
def test_single_crosslink_update_from_current_epoch(state):
next_epoch(state)
- attestation = get_valid_attestation(state)
+ attestation = get_valid_attestation(state, signed=True)
fill_aggregate_attestation(state, attestation)
add_attestation_to_state(state, attestation, state.slot + spec.MIN_ATTESTATION_INCLUSION_DELAY)
assert len(state.current_epoch_attestations) == 1
- pre_state, post_state = run_process_crosslinks(state)
-
shard = attestation.data.crosslink.shard
- assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
- assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
+ pre_crosslink = deepcopy(state.current_crosslinks[shard])
- return pre_state, post_state
+ yield from run_process_crosslinks(state)
+ assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
+ assert pre_crosslink != state.current_crosslinks[shard]
+
+@spec_state_test
def test_single_crosslink_update_from_previous_epoch(state):
next_epoch(state)
- attestation = get_valid_attestation(state)
+ attestation = get_valid_attestation(state, signed=True)
fill_aggregate_attestation(state, attestation)
add_attestation_to_state(state, attestation, state.slot + spec.SLOTS_PER_EPOCH)
assert len(state.previous_epoch_attestations) == 1
- pre_state, post_state = run_process_crosslinks(state)
+ shard = attestation.data.crosslink.shard
+ pre_crosslink = deepcopy(state.current_crosslinks[shard])
+
crosslink_deltas = get_crosslink_deltas(state)
- shard = attestation.data.crosslink.shard
- assert post_state.previous_crosslinks[shard] != post_state.current_crosslinks[shard]
- assert pre_state.current_crosslinks[shard] != post_state.current_crosslinks[shard]
+ yield from run_process_crosslinks(state)
+
+ assert state.previous_crosslinks[shard] != state.current_crosslinks[shard]
+ assert pre_crosslink != state.current_crosslinks[shard]
+
# ensure rewarded
for index in get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.crosslink.shard):
assert crosslink_deltas[0][index] > 0
assert crosslink_deltas[1][index] == 0
- return pre_state, post_state
-
+@spec_state_test
def test_double_late_crosslink(state):
+ if spec.get_epoch_committee_count(state, spec.get_current_epoch(state)) < spec.SHARD_COUNT:
+ print("warning: ignoring test, test-assumptions are incompatible with configuration")
+ return
+
next_epoch(state)
state.slot += 4
- attestation_1 = get_valid_attestation(state)
+ attestation_1 = get_valid_attestation(state, signed=True)
fill_aggregate_attestation(state, attestation_1)
- # add attestation_1 in the next epoch
+ # add attestation_1 to next epoch
next_epoch(state)
add_attestation_to_state(state, attestation_1, state.slot + 1)
for slot in range(spec.SLOTS_PER_EPOCH):
attestation_2 = get_valid_attestation(state)
if attestation_2.data.crosslink.shard == attestation_1.data.crosslink.shard:
+ sign_attestation(state, attestation_2)
break
next_slot(state)
+ apply_empty_block(state)
+
fill_aggregate_attestation(state, attestation_2)
# add attestation_2 in the next epoch after attestation_1 has
@@ -119,16 +135,15 @@ def test_double_late_crosslink(state):
assert len(state.previous_epoch_attestations) == 1
assert len(state.current_epoch_attestations) == 0
- pre_state, post_state = run_process_crosslinks(state)
crosslink_deltas = get_crosslink_deltas(state)
+ yield from run_process_crosslinks(state)
+
shard = attestation_2.data.crosslink.shard
# ensure that the current crosslinks were not updated by the second attestation
- assert post_state.previous_crosslinks[shard] == post_state.current_crosslinks[shard]
+ assert state.previous_crosslinks[shard] == state.current_crosslinks[shard]
# ensure no reward, only penalties for the failed crosslink
for index in get_crosslink_committee(state, attestation_2.data.target_epoch, attestation_2.data.crosslink.shard):
assert crosslink_deltas[0][index] == 0
assert crosslink_deltas[1][index] > 0
-
- return pre_state, post_state
diff --git a/test_libs/pyspec/tests/epoch_processing/test_process_registry_updates.py b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py
similarity index 54%
rename from test_libs/pyspec/tests/epoch_processing/test_process_registry_updates.py
rename to test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py
index 11f5de2ad4..8f6e350885 100644
--- a/test_libs/pyspec/tests/epoch_processing/test_process_registry_updates.py
+++ b/test_libs/pyspec/eth2spec/test/epoch_processing/test_process_registry_updates.py
@@ -1,21 +1,44 @@
-from copy import deepcopy
-
-import pytest
-
import eth2spec.phase0.spec as spec
from eth2spec.phase0.spec import (
get_current_epoch,
is_active_validator,
+ process_registry_updates
)
-from tests.helpers import (
- next_epoch,
-)
-
-# mark entire file as 'state'
-pytestmark = pytest.mark.state
-
-
+from eth2spec.phase0.spec import state_transition
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
+from eth2spec.test.helpers.state import next_epoch
+from eth2spec.test.context import spec_state_test
+
+
+def run_process_registry_updates(state, valid=True):
+ """
+ Run ``process_crosslinks``, yielding:
+ - pre-state ('pre')
+ - post-state ('post').
+ If ``valid == False``, run expecting ``AssertionError``
+ """
+ # transition state to slot before state transition
+ slot = state.slot + (spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH) - 1
+ block = build_empty_block_for_next_slot(state)
+ block.slot = slot
+ sign_block(state, block)
+ state_transition(state, block)
+
+ # cache state before epoch transition
+ spec.process_slot(state)
+
+ # process components of epoch transition before registry update
+ spec.process_justification_and_finalization(state)
+ spec.process_crosslinks(state)
+ spec.process_rewards_and_penalties(state)
+
+ yield 'pre', state
+ process_registry_updates(state)
+ yield 'post', state
+
+
+@spec_state_test
def test_activation(state):
index = 0
assert is_active_validator(state.validator_registry[index], get_current_epoch(state))
@@ -26,12 +49,10 @@ def test_activation(state):
state.validator_registry[index].effective_balance = spec.MAX_EFFECTIVE_BALANCE
assert not is_active_validator(state.validator_registry[index], get_current_epoch(state))
- pre_state = deepcopy(state)
-
- blocks = []
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
- block = next_epoch(state)
- blocks.append(block)
+ next_epoch(state)
+
+ yield from run_process_registry_updates(state)
assert state.validator_registry[index].activation_eligibility_epoch != spec.FAR_FUTURE_EPOCH
assert state.validator_registry[index].activation_epoch != spec.FAR_FUTURE_EPOCH
@@ -40,9 +61,8 @@ def test_activation(state):
get_current_epoch(state),
)
- return pre_state, blocks, state
-
+@spec_state_test
def test_ejection(state):
index = 0
assert is_active_validator(state.validator_registry[index], get_current_epoch(state))
@@ -51,17 +71,13 @@ def test_ejection(state):
# Mock an ejection
state.validator_registry[index].effective_balance = spec.EJECTION_BALANCE
- pre_state = deepcopy(state)
-
- blocks = []
for _ in range(spec.ACTIVATION_EXIT_DELAY + 1):
- block = next_epoch(state)
- blocks.append(block)
+ next_epoch(state)
+
+ yield from run_process_registry_updates(state)
assert state.validator_registry[index].exit_epoch != spec.FAR_FUTURE_EPOCH
assert not is_active_validator(
state.validator_registry[index],
get_current_epoch(state),
)
-
- return pre_state, blocks, state
diff --git a/test_libs/pyspec/eth2spec/test/helpers/__init__.py b/test_libs/pyspec/eth2spec/test/helpers/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/eth2spec/test/helpers/attestations.py b/test_libs/pyspec/eth2spec/test/helpers/attestations.py
new file mode 100644
index 0000000000..6ac0b994eb
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/attestations.py
@@ -0,0 +1,161 @@
+from typing import List
+
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+from eth2spec.phase0.spec import (
+ Attestation,
+ AttestationData,
+ AttestationDataAndCustodyBit,
+ Crosslink,
+ get_epoch_start_slot, get_block_root, get_current_epoch, get_previous_epoch, slot_to_epoch,
+ get_crosslink_committee, get_domain, IndexedAttestation, get_attesting_indices, BeaconState, get_block_root_at_slot,
+ get_epoch_start_shard, get_epoch_committee_count,
+ state_transition, process_slots,
+)
+from eth2spec.test.helpers.bitfields import set_bitfield_bit
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
+from eth2spec.test.helpers.keys import privkeys
+from eth2spec.utils.bls import bls_sign, bls_aggregate_signatures
+from eth2spec.utils.minimal_ssz import hash_tree_root
+
+
+def build_attestation_data(state, slot, shard):
+ assert state.slot >= slot
+
+ if slot == state.slot:
+ block_root = build_empty_block_for_next_slot(state).parent_root
+ else:
+ block_root = get_block_root_at_slot(state, slot)
+
+ current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state))
+ if slot < current_epoch_start_slot:
+ epoch_boundary_root = get_block_root(state, get_previous_epoch(state))
+ elif slot == current_epoch_start_slot:
+ epoch_boundary_root = block_root
+ else:
+ epoch_boundary_root = get_block_root(state, get_current_epoch(state))
+
+ if slot < current_epoch_start_slot:
+ justified_epoch = state.previous_justified_epoch
+ justified_block_root = state.previous_justified_root
+ else:
+ justified_epoch = state.current_justified_epoch
+ justified_block_root = state.current_justified_root
+
+ if slot_to_epoch(slot) == get_current_epoch(state):
+ parent_crosslink = state.current_crosslinks[shard]
+ else:
+ parent_crosslink = state.previous_crosslinks[shard]
+
+ return AttestationData(
+ beacon_block_root=block_root,
+ source_epoch=justified_epoch,
+ source_root=justified_block_root,
+ target_epoch=slot_to_epoch(slot),
+ target_root=epoch_boundary_root,
+ crosslink=Crosslink(
+ shard=shard,
+ start_epoch=parent_crosslink.end_epoch,
+ end_epoch=min(slot_to_epoch(slot), parent_crosslink.end_epoch + spec.MAX_EPOCHS_PER_CROSSLINK),
+ data_root=spec.ZERO_HASH,
+ parent_root=hash_tree_root(parent_crosslink),
+ ),
+ )
+
+
+def get_valid_attestation(state, slot=None, signed=False):
+ if slot is None:
+ slot = state.slot
+
+ epoch = slot_to_epoch(slot)
+ epoch_start_shard = get_epoch_start_shard(state, epoch)
+ committees_per_slot = get_epoch_committee_count(state, epoch) // spec.SLOTS_PER_EPOCH
+ shard = (epoch_start_shard + committees_per_slot * (slot % spec.SLOTS_PER_EPOCH)) % spec.SHARD_COUNT
+
+ attestation_data = build_attestation_data(state, slot, shard)
+
+ crosslink_committee = get_crosslink_committee(
+ state,
+ attestation_data.target_epoch,
+ attestation_data.crosslink.shard
+ )
+
+ committee_size = len(crosslink_committee)
+ bitfield_length = (committee_size + 7) // 8
+ aggregation_bitfield = b'\x00' * bitfield_length
+ custody_bitfield = b'\x00' * bitfield_length
+ attestation = Attestation(
+ aggregation_bitfield=aggregation_bitfield,
+ data=attestation_data,
+ custody_bitfield=custody_bitfield,
+ )
+ fill_aggregate_attestation(state, attestation)
+ if signed:
+ sign_attestation(state, attestation)
+ return attestation
+
+
+def sign_aggregate_attestation(state: BeaconState, data: AttestationData, participants: List[int]):
+ signatures = []
+ for validator_index in participants:
+ privkey = privkeys[validator_index]
+ signatures.append(
+ get_attestation_signature(
+ state,
+ data,
+ privkey
+ )
+ )
+
+ return bls_aggregate_signatures(signatures)
+
+
+def sign_indexed_attestation(state, indexed_attestation: IndexedAttestation):
+ participants = indexed_attestation.custody_bit_0_indices + indexed_attestation.custody_bit_1_indices
+ indexed_attestation.signature = sign_aggregate_attestation(state, indexed_attestation.data, participants)
+
+
+def sign_attestation(state, attestation: Attestation):
+ participants = get_attesting_indices(
+ state,
+ attestation.data,
+ attestation.aggregation_bitfield,
+ )
+
+ attestation.signature = sign_aggregate_attestation(state, attestation.data, participants)
+
+
+def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0):
+ message_hash = AttestationDataAndCustodyBit(
+ data=attestation_data,
+ custody_bit=custody_bit,
+ ).hash_tree_root()
+
+ return bls_sign(
+ message_hash=message_hash,
+ privkey=privkey,
+ domain=get_domain(
+ state=state,
+ domain_type=spec.DOMAIN_ATTESTATION,
+ message_epoch=attestation_data.target_epoch,
+ )
+ )
+
+
+def fill_aggregate_attestation(state, attestation):
+ crosslink_committee = get_crosslink_committee(
+ state,
+ attestation.data.target_epoch,
+ attestation.data.crosslink.shard,
+ )
+ for i in range(len(crosslink_committee)):
+ attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)
+
+
+def add_attestation_to_state(state, attestation, slot):
+ block = build_empty_block_for_next_slot(state)
+ block.slot = slot
+ block.body.attestations.append(attestation)
+ process_slots(state, block.slot)
+ sign_block(state, block)
+ state_transition(state, block)
diff --git a/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py
new file mode 100644
index 0000000000..d19b41dfec
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/attester_slashings.py
@@ -0,0 +1,19 @@
+from copy import deepcopy
+
+from eth2spec.phase0.spec import AttesterSlashing, convert_to_indexed
+from eth2spec.test.helpers.attestations import get_valid_attestation, sign_attestation
+
+
+def get_valid_attester_slashing(state, signed_1=False, signed_2=False):
+ attestation_1 = get_valid_attestation(state, signed=signed_1)
+
+ attestation_2 = deepcopy(attestation_1)
+ attestation_2.data.target_root = b'\x01' * 32
+
+ if signed_2:
+ sign_attestation(state, attestation_2)
+
+ return AttesterSlashing(
+ attestation_1=convert_to_indexed(state, attestation_1),
+ attestation_2=convert_to_indexed(state, attestation_2),
+ )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/bitfields.py b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py
new file mode 100644
index 0000000000..7c25d073ab
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/bitfields.py
@@ -0,0 +1,11 @@
+def set_bitfield_bit(bitfield, i):
+ """
+ Set the bit in ``bitfield`` at position ``i`` to ``1``.
+ """
+ byte_index = i // 8
+ bit_index = i % 8
+ return (
+ bitfield[:byte_index] +
+ bytes([bitfield[byte_index] | (1 << bit_index)]) +
+ bitfield[byte_index + 1:]
+ )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/block.py b/test_libs/pyspec/eth2spec/test/helpers/block.py
new file mode 100644
index 0000000000..c557e02452
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/block.py
@@ -0,0 +1,80 @@
+from copy import deepcopy
+
+from eth2spec.phase0 import spec
+from eth2spec.phase0.spec import (
+ BeaconBlock,
+ get_beacon_proposer_index, slot_to_epoch, get_domain,
+ process_slots, state_transition,
+)
+from eth2spec.test.helpers.keys import privkeys
+from eth2spec.utils.bls import bls_sign, only_with_bls
+from eth2spec.utils.minimal_ssz import signing_root, hash_tree_root
+
+
+# Fully ignore the function if BLS is off, beacon-proposer index calculation is slow.
+@only_with_bls()
+def sign_block(state, block, proposer_index=None):
+ assert state.slot <= block.slot
+
+ if proposer_index is None:
+ if block.slot == state.slot:
+ proposer_index = get_beacon_proposer_index(state)
+ else:
+ if slot_to_epoch(state.slot) + 1 > slot_to_epoch(block.slot):
+ print("warning: block slot far away, and no proposer index manually given."
+ " Signing block is slow due to transition for proposer index calculation.")
+ # use stub state to get proposer index of future slot
+ stub_state = deepcopy(state)
+ process_slots(stub_state, block.slot)
+ proposer_index = get_beacon_proposer_index(stub_state)
+
+ privkey = privkeys[proposer_index]
+
+ block.body.randao_reveal = bls_sign(
+ privkey=privkey,
+ message_hash=hash_tree_root(slot_to_epoch(block.slot)),
+ domain=get_domain(
+ state,
+ message_epoch=slot_to_epoch(block.slot),
+ domain_type=spec.DOMAIN_RANDAO,
+ )
+ )
+ block.signature = bls_sign(
+ message_hash=signing_root(block),
+ privkey=privkey,
+ domain=get_domain(
+ state,
+ spec.DOMAIN_BEACON_PROPOSER,
+ slot_to_epoch(block.slot)))
+
+
+def apply_empty_block(state):
+ """
+ Transition via an empty block (on current slot, assuming no block has been applied yet).
+ :return: the empty block that triggered the transition.
+ """
+ block = build_empty_block(state, signed=True)
+ state_transition(state, block)
+ return block
+
+
+def build_empty_block(state, slot=None, signed=False):
+ if slot is None:
+ slot = state.slot
+ empty_block = BeaconBlock()
+ empty_block.slot = slot
+ empty_block.body.eth1_data.deposit_count = state.deposit_index
+ previous_block_header = deepcopy(state.latest_block_header)
+ if previous_block_header.state_root == spec.ZERO_HASH:
+ previous_block_header.state_root = state.hash_tree_root()
+ empty_block.parent_root = signing_root(previous_block_header)
+
+ if signed:
+ sign_block(state, empty_block)
+
+ return empty_block
+
+
+def build_empty_block_for_next_slot(state, signed=False):
+ return build_empty_block(state, state.slot + 1, signed=signed)
+
diff --git a/test_libs/pyspec/eth2spec/test/helpers/block_header.py b/test_libs/pyspec/eth2spec/test/helpers/block_header.py
new file mode 100644
index 0000000000..9aba62d37d
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/block_header.py
@@ -0,0 +1,18 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import get_domain
+from eth2spec.utils.bls import bls_sign
+from eth2spec.utils.minimal_ssz import signing_root
+
+
+def sign_block_header(state, header, privkey):
+ domain = get_domain(
+ state=state,
+ domain_type=spec.DOMAIN_BEACON_PROPOSER,
+ )
+ header.signature = bls_sign(
+ message_hash=signing_root(header),
+ privkey=privkey,
+ domain=domain,
+ )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/deposits.py b/test_libs/pyspec/eth2spec/test/helpers/deposits.py
new file mode 100644
index 0000000000..c5deb124e6
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/deposits.py
@@ -0,0 +1,81 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import get_domain, DepositData, verify_merkle_branch, Deposit, ZERO_HASH
+from eth2spec.test.helpers.keys import pubkeys, privkeys
+from eth2spec.utils.bls import bls_sign
+from eth2spec.utils.merkle_minimal import calc_merkle_tree_from_leaves, get_merkle_root, get_merkle_proof
+from eth2spec.utils.minimal_ssz import signing_root
+
+
+def build_deposit_data(state, pubkey, privkey, amount, signed=False):
+ deposit_data = DepositData(
+ pubkey=pubkey,
+ # insecurely use pubkey as withdrawal key as well
+ withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:],
+ amount=amount,
+ )
+ if signed:
+ sign_deposit_data(state, deposit_data, privkey)
+ return deposit_data
+
+
+def sign_deposit_data(state, deposit_data, privkey):
+ signature = bls_sign(
+ message_hash=signing_root(deposit_data),
+ privkey=privkey,
+ domain=get_domain(
+ state,
+ spec.DOMAIN_DEPOSIT,
+ )
+ )
+ deposit_data.signature = signature
+
+
+def build_deposit(state,
+ deposit_data_leaves,
+ pubkey,
+ privkey,
+ amount,
+ signed):
+ deposit_data = build_deposit_data(state, pubkey, privkey, amount, signed)
+
+ item = deposit_data.hash_tree_root()
+ index = len(deposit_data_leaves)
+ deposit_data_leaves.append(item)
+ tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
+ root = get_merkle_root((tuple(deposit_data_leaves)))
+ proof = list(get_merkle_proof(tree, item_index=index))
+ assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
+
+ deposit = Deposit(
+ proof=list(proof),
+ index=index,
+ data=deposit_data,
+ )
+
+ return deposit, root, deposit_data_leaves
+
+
+def prepare_state_and_deposit(state, validator_index, amount, signed=False):
+ """
+ Prepare the state for the deposit, and create a deposit for the given validator, depositing the given amount.
+ """
+ pre_validator_count = len(state.validator_registry)
+ # fill previous deposits with zero-hash
+ deposit_data_leaves = [ZERO_HASH] * pre_validator_count
+
+ pubkey = pubkeys[validator_index]
+ privkey = privkeys[validator_index]
+ deposit, root, deposit_data_leaves = build_deposit(
+ state,
+ deposit_data_leaves,
+ pubkey,
+ privkey,
+ amount,
+ signed
+ )
+
+ state.latest_eth1_data.deposit_root = root
+ state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
+ return deposit
diff --git a/test_libs/pyspec/eth2spec/test/helpers/genesis.py b/test_libs/pyspec/eth2spec/test/helpers/genesis.py
new file mode 100644
index 0000000000..01011cacd0
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/genesis.py
@@ -0,0 +1,51 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import Eth1Data, ZERO_HASH, get_active_validator_indices
+from eth2spec.test.helpers.keys import pubkeys
+from eth2spec.utils.minimal_ssz import hash_tree_root
+
+
+def build_mock_validator(i: int, balance: int):
+ pubkey = pubkeys[i]
+ # insecurely use pubkey as withdrawal key as well
+ withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(pubkey)[1:]
+ return spec.Validator(
+ pubkey=pubkeys[i],
+ withdrawal_credentials=withdrawal_credentials,
+ activation_eligibility_epoch=spec.FAR_FUTURE_EPOCH,
+ activation_epoch=spec.FAR_FUTURE_EPOCH,
+ exit_epoch=spec.FAR_FUTURE_EPOCH,
+ withdrawable_epoch=spec.FAR_FUTURE_EPOCH,
+ effective_balance=min(balance - balance % spec.EFFECTIVE_BALANCE_INCREMENT, spec.MAX_EFFECTIVE_BALANCE)
+ )
+
+
+def create_genesis_state(num_validators):
+ deposit_root = b'\x42' * 32
+
+ state = spec.BeaconState(
+ genesis_time=0,
+ deposit_index=num_validators,
+ latest_eth1_data=Eth1Data(
+ deposit_root=deposit_root,
+ deposit_count=num_validators,
+ block_hash=ZERO_HASH,
+ ))
+
+ # We "hack" in the initial validators,
+ # as it is much faster than creating and processing genesis deposits for every single test case.
+ state.balances = [spec.MAX_EFFECTIVE_BALANCE] * num_validators
+ state.validator_registry = [build_mock_validator(i, state.balances[i]) for i in range(num_validators)]
+
+ # Process genesis activations
+ for validator in state.validator_registry:
+ if validator.effective_balance >= spec.MAX_EFFECTIVE_BALANCE:
+ validator.activation_eligibility_epoch = spec.GENESIS_EPOCH
+ validator.activation_epoch = spec.GENESIS_EPOCH
+
+ genesis_active_index_root = hash_tree_root(get_active_validator_indices(state, spec.GENESIS_EPOCH))
+ for index in range(spec.LATEST_ACTIVE_INDEX_ROOTS_LENGTH):
+ state.latest_active_index_roots[index] = genesis_active_index_root
+
+ return state
diff --git a/test_libs/pyspec/eth2spec/test/helpers/keys.py b/test_libs/pyspec/eth2spec/test/helpers/keys.py
new file mode 100644
index 0000000000..f47cd7c10b
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/keys.py
@@ -0,0 +1,6 @@
+from py_ecc import bls
+from eth2spec.phase0 import spec
+
+privkeys = [i + 1 for i in range(spec.SLOTS_PER_EPOCH * 16)]
+pubkeys = [bls.privtopub(privkey) for privkey in privkeys]
+pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
diff --git a/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py
new file mode 100644
index 0000000000..02629f7da0
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/proposer_slashings.py
@@ -0,0 +1,35 @@
+from copy import deepcopy
+
+from eth2spec.phase0.spec import (
+ get_current_epoch, get_active_validator_indices, BeaconBlockHeader, ProposerSlashing
+)
+from eth2spec.test.helpers.block_header import sign_block_header
+from eth2spec.test.helpers.keys import pubkey_to_privkey
+
+
+def get_valid_proposer_slashing(state, signed_1=False, signed_2=False):
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[-1]
+ privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
+ slot = state.slot
+
+ header_1 = BeaconBlockHeader(
+ slot=slot,
+ parent_root=b'\x33' * 32,
+ state_root=b'\x44' * 32,
+ block_body_root=b'\x55' * 32,
+ )
+ header_2 = deepcopy(header_1)
+ header_2.parent_root = b'\x99' * 32
+ header_2.slot = slot + 1
+
+ if signed_1:
+ sign_block_header(state, header_1, privkey)
+ if signed_2:
+ sign_block_header(state, header_2, privkey)
+
+ return ProposerSlashing(
+ proposer_index=validator_index,
+ header_1=header_1,
+ header_2=header_2,
+ )
diff --git a/test_libs/pyspec/eth2spec/test/helpers/state.py b/test_libs/pyspec/eth2spec/test/helpers/state.py
new file mode 100644
index 0000000000..1137561f1e
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/state.py
@@ -0,0 +1,31 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import process_slots
+
+
+def get_balance(state, index):
+ return state.balances[index]
+
+
+def next_slot(state):
+ """
+ Transition to the next slot.
+ """
+ process_slots(state, state.slot + 1)
+
+
+def next_epoch(state):
+ """
+ Transition to the start slot of the next epoch
+ """
+ slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
+ process_slots(state, slot)
+
+
+def get_state_root(state, slot) -> bytes:
+ """
+ Return the state root at a recent ``slot``.
+ """
+ assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
+ return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
diff --git a/test_libs/pyspec/eth2spec/test/helpers/transfers.py b/test_libs/pyspec/eth2spec/test/helpers/transfers.py
new file mode 100644
index 0000000000..2045f48ad6
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/transfers.py
@@ -0,0 +1,55 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import get_current_epoch, get_active_validator_indices, Transfer, get_domain
+from eth2spec.test.helpers.keys import pubkeys, privkeys
+from eth2spec.test.helpers.state import get_balance
+from eth2spec.utils.bls import bls_sign
+from eth2spec.utils.minimal_ssz import signing_root
+
+
+def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None, signed=False):
+ if slot is None:
+ slot = state.slot
+ current_epoch = get_current_epoch(state)
+ if sender_index is None:
+ sender_index = get_active_validator_indices(state, current_epoch)[-1]
+ recipient_index = get_active_validator_indices(state, current_epoch)[0]
+ transfer_pubkey = pubkeys[-1]
+ transfer_privkey = privkeys[-1]
+
+ if fee is None:
+ fee = get_balance(state, sender_index) // 32
+ if amount is None:
+ amount = get_balance(state, sender_index) - fee
+
+ transfer = Transfer(
+ sender=sender_index,
+ recipient=recipient_index,
+ amount=amount,
+ fee=fee,
+ slot=slot,
+ pubkey=transfer_pubkey,
+ )
+ if signed:
+ sign_transfer(state, transfer, transfer_privkey)
+
+ # ensure withdrawal_credentials reproducible
+ state.validator_registry[transfer.sender].withdrawal_credentials = (
+ spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:]
+ )
+
+ return transfer
+
+
+def sign_transfer(state, transfer, privkey):
+ transfer.signature = bls_sign(
+ message_hash=signing_root(transfer),
+ privkey=privkey,
+ domain=get_domain(
+ state=state,
+ domain_type=spec.DOMAIN_TRANSFER,
+ message_epoch=get_current_epoch(state),
+ )
+ )
+ return transfer
diff --git a/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py
new file mode 100644
index 0000000000..54376d694b
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/helpers/voluntary_exits.py
@@ -0,0 +1,28 @@
+# Access constants from spec pkg reference.
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import VoluntaryExit, get_domain
+from eth2spec.utils.bls import bls_sign
+from eth2spec.utils.minimal_ssz import signing_root
+
+
+def build_voluntary_exit(state, epoch, validator_index, privkey, signed=False):
+ voluntary_exit = VoluntaryExit(
+ epoch=epoch,
+ validator_index=validator_index,
+ )
+ if signed:
+ sign_voluntary_exit(state, voluntary_exit, privkey)
+ return voluntary_exit
+
+
+def sign_voluntary_exit(state, voluntary_exit, privkey):
+ voluntary_exit.signature = bls_sign(
+ message_hash=signing_root(voluntary_exit),
+ privkey=privkey,
+ domain=get_domain(
+ state=state,
+ domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
+ message_epoch=voluntary_exit.epoch,
+ )
+ )
diff --git a/test_libs/pyspec/eth2spec/test/sanity/__init__.py b/test_libs/pyspec/eth2spec/test/sanity/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py
new file mode 100644
index 0000000000..654a41d62f
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/sanity/test_blocks.py
@@ -0,0 +1,406 @@
+from copy import deepcopy
+
+import eth2spec.phase0.spec as spec
+from eth2spec.utils.bls import bls_sign
+
+from eth2spec.utils.minimal_ssz import signing_root
+from eth2spec.phase0.spec import (
+ # SSZ
+ VoluntaryExit,
+ # functions
+ get_active_validator_indices,
+ get_beacon_proposer_index,
+ get_block_root_at_slot,
+ get_current_epoch,
+ get_domain,
+ state_transition,
+)
+from eth2spec.test.helpers.state import get_balance
+from eth2spec.test.helpers.transfers import get_valid_transfer
+from eth2spec.test.helpers.block import build_empty_block_for_next_slot, sign_block
+from eth2spec.test.helpers.keys import privkeys, pubkeys
+from eth2spec.test.helpers.attester_slashings import get_valid_attester_slashing
+from eth2spec.test.helpers.proposer_slashings import get_valid_proposer_slashing
+from eth2spec.test.helpers.attestations import get_valid_attestation
+from eth2spec.test.helpers.deposits import prepare_state_and_deposit
+
+from eth2spec.test.context import spec_state_test, never_bls
+
+
+@never_bls
+@spec_state_test
+def test_empty_block_transition(state):
+ pre_slot = state.slot
+ pre_eth1_votes = len(state.eth1_data_votes)
+
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state, signed=True)
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ assert len(state.eth1_data_votes) == pre_eth1_votes + 1
+ assert get_block_root_at_slot(state, pre_slot) == block.parent_root
+
+
+@never_bls
+@spec_state_test
+def test_skipped_slots(state):
+ pre_slot = state.slot
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state)
+ block.slot += 3
+ sign_block(state, block)
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ assert state.slot == block.slot
+ for slot in range(pre_slot, state.slot):
+ assert get_block_root_at_slot(state, slot) == block.parent_root
+
+
+@spec_state_test
+def test_empty_epoch_transition(state):
+ pre_slot = state.slot
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state)
+ block.slot += spec.SLOTS_PER_EPOCH
+ sign_block(state, block)
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ assert state.slot == block.slot
+ for slot in range(pre_slot, state.slot):
+ assert get_block_root_at_slot(state, slot) == block.parent_root
+
+
+# @spec_state_test
+# def test_empty_epoch_transition_not_finalizing(state):
+# # copy for later balance lookups.
+# pre_state = deepcopy(state)
+# yield 'pre', state
+#
+# block = build_empty_block_for_next_slot(state)
+# block.slot += spec.SLOTS_PER_EPOCH * 5
+# sign_block(state, block, proposer_index=0)
+# yield 'blocks', [block], [spec.BeaconBlock]
+#
+# state_transition(state, block)
+# yield 'post', state
+#
+# assert state.slot == block.slot
+# assert state.finalized_epoch < get_current_epoch(state) - 4
+# for index in range(len(state.validator_registry)):
+# assert get_balance(state, index) < get_balance(pre_state, index)
+
+
+@spec_state_test
+def test_proposer_slashing(state):
+ # copy for later balance lookups.
+ pre_state = deepcopy(state)
+ proposer_slashing = get_valid_proposer_slashing(state, signed_1=True, signed_2=True)
+ validator_index = proposer_slashing.proposer_index
+
+ assert not state.validator_registry[validator_index].slashed
+
+ yield 'pre', state
+
+ #
+ # Add to state via block transition
+ #
+ block = build_empty_block_for_next_slot(state)
+ block.body.proposer_slashings.append(proposer_slashing)
+ sign_block(state, block)
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ # check if slashed
+ slashed_validator = state.validator_registry[validator_index]
+ assert slashed_validator.slashed
+ assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
+ assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
+ # lost whistleblower reward
+ assert get_balance(state, validator_index) < get_balance(pre_state, validator_index)
+
+
+@spec_state_test
+def test_attester_slashing(state):
+ # copy for later balance lookups.
+ pre_state = deepcopy(state)
+
+ attester_slashing = get_valid_attester_slashing(state, signed_1=True, signed_2=True)
+ validator_index = (attester_slashing.attestation_1.custody_bit_0_indices +
+ attester_slashing.attestation_1.custody_bit_1_indices)[0]
+
+ assert not state.validator_registry[validator_index].slashed
+
+ yield 'pre', state
+
+ #
+ # Add to state via block transition
+ #
+ block = build_empty_block_for_next_slot(state)
+ block.body.attester_slashings.append(attester_slashing)
+ sign_block(state, block)
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ slashed_validator = state.validator_registry[validator_index]
+ assert slashed_validator.slashed
+ assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
+ assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
+ # lost whistleblower reward
+ assert get_balance(state, validator_index) < get_balance(pre_state, validator_index)
+
+ proposer_index = get_beacon_proposer_index(state)
+ # gained whistleblower reward
+ assert (
+ get_balance(state, proposer_index) >
+ get_balance(pre_state, proposer_index)
+ )
+
+
+# TODO update functions below to be like above, i.e. with @spec_state_test and yielding data to put into the test vector
+
+@spec_state_test
+def test_deposit_in_block(state):
+ initial_registry_len = len(state.validator_registry)
+ initial_balances_len = len(state.balances)
+
+ validator_index = len(state.validator_registry)
+ amount = spec.MAX_EFFECTIVE_BALANCE
+ deposit = prepare_state_and_deposit(state, validator_index, amount, signed=True)
+
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state)
+ block.body.deposits.append(deposit)
+ sign_block(state, block)
+
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ assert len(state.validator_registry) == initial_registry_len + 1
+ assert len(state.balances) == initial_balances_len + 1
+ assert get_balance(state, validator_index) == spec.MAX_EFFECTIVE_BALANCE
+ assert state.validator_registry[validator_index].pubkey == pubkeys[validator_index]
+
+
+@spec_state_test
+def test_deposit_top_up(state):
+ validator_index = 0
+ amount = spec.MAX_EFFECTIVE_BALANCE // 4
+ deposit = prepare_state_and_deposit(state, validator_index, amount)
+
+ initial_registry_len = len(state.validator_registry)
+ initial_balances_len = len(state.balances)
+ validator_pre_balance = get_balance(state, validator_index)
+
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state)
+ block.body.deposits.append(deposit)
+ sign_block(state, block)
+
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ assert len(state.validator_registry) == initial_registry_len
+ assert len(state.balances) == initial_balances_len
+ assert get_balance(state, validator_index) == validator_pre_balance + amount
+
+
+@spec_state_test
+def test_attestation(state):
+ state.slot = spec.SLOTS_PER_EPOCH
+
+ yield 'pre', state
+
+ attestation = get_valid_attestation(state, signed=True)
+
+ # Add to state via block transition
+ pre_current_attestations_len = len(state.current_epoch_attestations)
+ attestation_block = build_empty_block_for_next_slot(state)
+ attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
+ attestation_block.body.attestations.append(attestation)
+ sign_block(state, attestation_block)
+ state_transition(state, attestation_block)
+
+ assert len(state.current_epoch_attestations) == pre_current_attestations_len + 1
+
+ # Epoch transition should move to previous_epoch_attestations
+ pre_current_attestations_root = spec.hash_tree_root(state.current_epoch_attestations)
+
+ epoch_block = build_empty_block_for_next_slot(state)
+ epoch_block.slot += spec.SLOTS_PER_EPOCH
+ sign_block(state, epoch_block)
+ state_transition(state, epoch_block)
+
+ yield 'blocks', [attestation_block, epoch_block], [spec.BeaconBlock]
+ yield 'post', state
+
+ assert len(state.current_epoch_attestations) == 0
+ assert spec.hash_tree_root(state.previous_epoch_attestations) == pre_current_attestations_root
+
+
+@spec_state_test
+def test_voluntary_exit(state):
+ validator_index = get_active_validator_indices(
+ state,
+ get_current_epoch(state)
+ )[-1]
+
+ # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
+ state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
+
+ yield 'pre', state
+
+ voluntary_exit = VoluntaryExit(
+ epoch=get_current_epoch(state),
+ validator_index=validator_index,
+ )
+ voluntary_exit.signature = bls_sign(
+ message_hash=signing_root(voluntary_exit),
+ privkey=privkeys[validator_index],
+ domain=get_domain(
+ state=state,
+ domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
+ )
+ )
+
+ # Add to state via block transition
+ initiate_exit_block = build_empty_block_for_next_slot(state)
+ initiate_exit_block.body.voluntary_exits.append(voluntary_exit)
+ sign_block(state, initiate_exit_block)
+ state_transition(state, initiate_exit_block)
+
+ assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+
+ # Process within epoch transition
+ exit_block = build_empty_block_for_next_slot(state)
+ exit_block.slot += spec.SLOTS_PER_EPOCH
+ sign_block(state, exit_block)
+ state_transition(state, exit_block)
+
+ yield 'blocks', [initiate_exit_block, exit_block], [spec.BeaconBlock]
+ yield 'post', state
+
+ assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+
+
+@spec_state_test
+def test_transfer(state):
+ # overwrite default 0 to test
+ spec.MAX_TRANSFERS = 1
+
+ sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
+ amount = get_balance(state, sender_index)
+
+ transfer = get_valid_transfer(state, state.slot + 1, sender_index, amount, signed=True)
+ recipient_index = transfer.recipient
+ pre_transfer_recipient_balance = get_balance(state, recipient_index)
+
+ # un-activate so validator can transfer
+ state.validator_registry[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
+
+ yield 'pre', state
+
+ # Add to state via block transition
+ block = build_empty_block_for_next_slot(state)
+ block.body.transfers.append(transfer)
+ sign_block(state, block)
+
+ yield 'blocks', [block], [spec.BeaconBlock]
+
+ state_transition(state, block)
+ yield 'post', state
+
+ sender_balance = get_balance(state, sender_index)
+ recipient_balance = get_balance(state, recipient_index)
+ assert sender_balance == 0
+ assert recipient_balance == pre_transfer_recipient_balance + amount
+
+
+@spec_state_test
+def test_balance_driven_status_transitions(state):
+ current_epoch = get_current_epoch(state)
+ validator_index = get_active_validator_indices(state, current_epoch)[-1]
+
+ assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
+
+ # set validator balance to below ejection threshold
+ state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE
+
+ yield 'pre', state
+
+ # trigger epoch transition
+ block = build_empty_block_for_next_slot(state)
+ block.slot += spec.SLOTS_PER_EPOCH
+ sign_block(state, block)
+ state_transition(state, block)
+
+ yield 'blocks', [block], [spec.BeaconBlock]
+ yield 'post', state
+
+ assert state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
+
+
+@spec_state_test
+def test_historical_batch(state):
+ state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1
+ pre_historical_roots_len = len(state.historical_roots)
+
+ yield 'pre', state
+
+ block = build_empty_block_for_next_slot(state, signed=True)
+ state_transition(state, block)
+
+ yield 'blocks', [block], [spec.BeaconBlock]
+ yield 'post', state
+
+ assert state.slot == block.slot
+ assert get_current_epoch(state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0
+ assert len(state.historical_roots) == pre_historical_roots_len + 1
+
+
+# @spec_state_test
+# def test_eth1_data_votes(state):
+# yield 'pre', state
+#
+# expected_votes = 0
+# assert len(state.eth1_data_votes) == expected_votes
+#
+# blocks = []
+# for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1):
+# block = build_empty_block_for_next_slot(state)
+# state_transition(state, block)
+# expected_votes += 1
+# assert len(state.eth1_data_votes) == expected_votes
+# blocks.append(block)
+#
+# block = build_empty_block_for_next_slot(state)
+# blocks.append(block)
+#
+# state_transition(state, block)
+#
+# yield 'blocks', [block], [spec.BeaconBlock]
+# yield 'post', state
+#
+# assert state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
+# assert len(state.eth1_data_votes) == 1
diff --git a/test_libs/pyspec/eth2spec/test/sanity/test_slots.py b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py
new file mode 100644
index 0000000000..92e0251ca5
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/sanity/test_slots.py
@@ -0,0 +1,58 @@
+import eth2spec.phase0.spec as spec
+
+from eth2spec.phase0.spec import process_slots
+from eth2spec.test.helpers.state import get_state_root
+from eth2spec.test.context import spec_state_test
+
+
+@spec_state_test
+def test_slots_1(state):
+ pre_slot = state.slot
+ pre_root = state.hash_tree_root()
+ yield 'pre', state
+
+ slots = 1
+ yield 'slots', slots
+ process_slots(state, state.slot + slots)
+
+ yield 'post', state
+ assert state.slot == pre_slot + 1
+ assert get_state_root(state, pre_slot) == pre_root
+
+
+@spec_state_test
+def test_slots_2(state):
+ yield 'pre', state
+ slots = 2
+ yield 'slots', slots
+ process_slots(state, state.slot + slots)
+ yield 'post', state
+
+
+@spec_state_test
+def test_empty_epoch(state):
+ yield 'pre', state
+ slots = spec.SLOTS_PER_EPOCH
+ yield 'slots', slots
+ process_slots(state, state.slot + slots)
+ yield 'post', state
+
+
+@spec_state_test
+def test_double_empty_epoch(state):
+ yield 'pre', state
+ slots = spec.SLOTS_PER_EPOCH * 2
+ yield 'slots', slots
+ process_slots(state, state.slot + slots)
+ yield 'post', state
+
+
+@spec_state_test
+def test_over_epoch_boundary(state):
+ process_slots(state, state.slot + (spec.SLOTS_PER_EPOCH // 2))
+ yield 'pre', state
+ slots = spec.SLOTS_PER_EPOCH
+ yield 'slots', slots
+ process_slots(state, state.slot + slots)
+ yield 'post', state
+
diff --git a/test_libs/pyspec/tests/test_finality.py b/test_libs/pyspec/eth2spec/test/test_finality.py
similarity index 55%
rename from test_libs/pyspec/tests/test_finality.py
rename to test_libs/pyspec/eth2spec/test/test_finality.py
index 816dfd6bd2..cdd09bf239 100644
--- a/test_libs/pyspec/tests/test_finality.py
+++ b/test_libs/pyspec/eth2spec/test/test_finality.py
@@ -1,20 +1,14 @@
from copy import deepcopy
-import pytest
-
import eth2spec.phase0.spec as spec
-
-from .helpers import (
- build_empty_block_for_next_slot,
- fill_aggregate_attestation,
+from eth2spec.phase0.spec import (
get_current_epoch,
get_epoch_start_slot,
- get_valid_attestation,
- next_epoch,
)
-
-# mark entire file as 'state'
-pytestmark = pytest.mark.state
+from .context import spec_state_test, never_bls
+from .helpers.state import next_epoch
+from .helpers.block import build_empty_block_for_next_slot, apply_empty_block
+from .helpers.attestations import get_valid_attestation
def check_finality(state,
@@ -55,13 +49,11 @@ def next_epoch_with_attestations(state,
slot_to_attest = post_state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1
if slot_to_attest >= get_epoch_start_slot(get_current_epoch(post_state)):
cur_attestation = get_valid_attestation(post_state, slot_to_attest)
- fill_aggregate_attestation(post_state, cur_attestation)
block.body.attestations.append(cur_attestation)
if fill_prev_epoch:
slot_to_attest = post_state.slot - spec.SLOTS_PER_EPOCH + 1
prev_attestation = get_valid_attestation(post_state, slot_to_attest)
- fill_aggregate_attestation(post_state, prev_attestation)
block.body.attestations.append(prev_attestation)
spec.state_transition(post_state, block)
@@ -70,126 +62,140 @@ def next_epoch_with_attestations(state,
return state, blocks, post_state
+@never_bls
+@spec_state_test
def test_finality_rule_4(state):
- test_state = deepcopy(state)
+ yield 'pre', state
blocks = []
for epoch in range(4):
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False)
blocks += new_blocks
# justification/finalization skipped at GENESIS_EPOCH
if epoch == 0:
- check_finality(test_state, prev_state, False, False, False)
+ check_finality(state, prev_state, False, False, False)
# justification/finalization skipped at GENESIS_EPOCH + 1
elif epoch == 1:
- check_finality(test_state, prev_state, False, False, False)
+ check_finality(state, prev_state, False, False, False)
elif epoch == 2:
- check_finality(test_state, prev_state, True, False, False)
+ check_finality(state, prev_state, True, False, False)
elif epoch >= 3:
# rule 4 of finality
- check_finality(test_state, prev_state, True, True, True)
- assert test_state.finalized_epoch == prev_state.current_justified_epoch
- assert test_state.finalized_root == prev_state.current_justified_root
+ check_finality(state, prev_state, True, True, True)
+ assert state.finalized_epoch == prev_state.current_justified_epoch
+ assert state.finalized_root == prev_state.current_justified_root
- return state, blocks, test_state
+ yield 'blocks', blocks, [spec.BeaconBlock]
+ yield 'post', state
+@never_bls
+@spec_state_test
def test_finality_rule_1(state):
# get past first two epochs that finality does not run on
next_epoch(state)
+ apply_empty_block(state)
next_epoch(state)
+ apply_empty_block(state)
- pre_state = deepcopy(state)
- test_state = deepcopy(state)
+ yield 'pre', state
blocks = []
for epoch in range(3):
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True)
blocks += new_blocks
if epoch == 0:
- check_finality(test_state, prev_state, True, False, False)
+ check_finality(state, prev_state, True, False, False)
elif epoch == 1:
- check_finality(test_state, prev_state, True, True, False)
+ check_finality(state, prev_state, True, True, False)
elif epoch == 2:
# finalized by rule 1
- check_finality(test_state, prev_state, True, True, True)
- assert test_state.finalized_epoch == prev_state.previous_justified_epoch
- assert test_state.finalized_root == prev_state.previous_justified_root
+ check_finality(state, prev_state, True, True, True)
+ assert state.finalized_epoch == prev_state.previous_justified_epoch
+ assert state.finalized_root == prev_state.previous_justified_root
- return pre_state, blocks, test_state
+ yield 'blocks', blocks, [spec.BeaconBlock]
+ yield 'post', state
+@never_bls
+@spec_state_test
def test_finality_rule_2(state):
# get past first two epochs that finality does not run on
next_epoch(state)
+ apply_empty_block(state)
next_epoch(state)
+ apply_empty_block(state)
- pre_state = deepcopy(state)
- test_state = deepcopy(state)
+ yield 'pre', state
blocks = []
for epoch in range(3):
if epoch == 0:
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
- check_finality(test_state, prev_state, True, False, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False)
+ check_finality(state, prev_state, True, False, False)
elif epoch == 1:
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
- check_finality(test_state, prev_state, False, True, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False)
+ check_finality(state, prev_state, False, True, False)
elif epoch == 2:
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True)
# finalized by rule 2
- check_finality(test_state, prev_state, True, False, True)
- assert test_state.finalized_epoch == prev_state.previous_justified_epoch
- assert test_state.finalized_root == prev_state.previous_justified_root
+ check_finality(state, prev_state, True, False, True)
+ assert state.finalized_epoch == prev_state.previous_justified_epoch
+ assert state.finalized_root == prev_state.previous_justified_root
blocks += new_blocks
- return pre_state, blocks, test_state
+ yield 'blocks', blocks, [spec.BeaconBlock]
+ yield 'post', state
+@never_bls
+@spec_state_test
def test_finality_rule_3(state):
"""
Test scenario described here
https://github.com/ethereum/eth2.0-specs/issues/611#issuecomment-463612892
"""
-
# get past first two epochs that finality does not run on
next_epoch(state)
+ apply_empty_block(state)
next_epoch(state)
+ apply_empty_block(state)
- pre_state = deepcopy(state)
- test_state = deepcopy(state)
+ yield 'pre', state
blocks = []
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False)
blocks += new_blocks
- check_finality(test_state, prev_state, True, False, False)
+ check_finality(state, prev_state, True, False, False)
# In epoch N, JE is set to N, prev JE is set to N-1
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, True, False)
blocks += new_blocks
- check_finality(test_state, prev_state, True, True, True)
+ check_finality(state, prev_state, True, True, True)
# In epoch N+1, JE is N, prev JE is N-1, and not enough messages get in to do anything
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, False)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, False, False)
blocks += new_blocks
- check_finality(test_state, prev_state, False, True, False)
+ check_finality(state, prev_state, False, True, False)
# In epoch N+2, JE is N, prev JE is N, and enough messages from the previous epoch get in to justify N+1.
# N+1 now becomes the JE. Not enough messages from epoch N+2 itself get in to justify N+2
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, False, True)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, False, True)
blocks += new_blocks
# rule 2
- check_finality(test_state, prev_state, True, False, True)
+ check_finality(state, prev_state, True, False, True)
# In epoch N+3, LJE is N+1, prev LJE is N, and enough messages get in to justify epochs N+2 and N+3.
- prev_state, new_blocks, test_state = next_epoch_with_attestations(test_state, True, True)
+ prev_state, new_blocks, state = next_epoch_with_attestations(state, True, True)
blocks += new_blocks
# rule 3
- check_finality(test_state, prev_state, True, True, True)
- assert test_state.finalized_epoch == prev_state.current_justified_epoch
- assert test_state.finalized_root == prev_state.current_justified_root
+ check_finality(state, prev_state, True, True, True)
+ assert state.finalized_epoch == prev_state.current_justified_epoch
+ assert state.finalized_root == prev_state.current_justified_root
- return pre_state, blocks, test_state
+ yield 'blocks', blocks, [spec.BeaconBlock]
+ yield 'post', state
diff --git a/test_libs/pyspec/eth2spec/test/utils.py b/test_libs/pyspec/eth2spec/test/utils.py
new file mode 100644
index 0000000000..b61801c3dd
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/test/utils.py
@@ -0,0 +1,80 @@
+from typing import Dict, Any, Callable, Iterable
+from eth2spec.debug.encode import encode
+
+
+def spectest(description: str = None):
+ def runner(fn):
+ # this wraps the function, to hide that the function actually is yielding data, instead of returning once.
+ def entry(*args, **kw):
+ # check generator mode, may be None/else.
+ # "pop" removes it, so it is not passed to the inner function.
+ if kw.pop('generator_mode', False) is True:
+ out = {}
+ if description is None:
+ # fall back on function name for test description
+ name = fn.__name__
+ if name.startswith('test_'):
+ name = name[5:]
+ out['description'] = name
+ else:
+ # description can be explicit
+ out['description'] = description
+ has_contents = False
+ # put all generated data into a dict.
+ for data in fn(*args, **kw):
+ has_contents = True
+ # If there is a type argument, encode it as that type.
+ if len(data) == 3:
+ (key, value, typ) = data
+ out[key] = encode(value, typ)
+ else:
+ # Otherwise, try to infer the type, but keep it as-is if it's not a SSZ container.
+ (key, value) = data
+ if hasattr(value.__class__, 'fields'):
+ out[key] = encode(value, value.__class__)
+ else:
+ out[key] = value
+ if has_contents:
+ return out
+ else:
+ return None
+ else:
+ # just complete the function, ignore all yielded data, we are not using it
+ for _ in fn(*args, **kw):
+ continue
+ return None
+ return entry
+ return runner
+
+
+def with_tags(tags: Dict[str, Any]):
+ """
+ Decorator factory, adds tags (key, value) pairs to the output of the function.
+ Useful to build test-vector annotations with.
+ This decorator is applied after the ``spectest`` decorator is applied.
+ :param tags: dict of tags
+ :return: Decorator.
+ """
+ def runner(fn):
+ def entry(*args, **kw):
+ fn_out = fn(*args, **kw)
+ # do not add tags if the function is not returning a dict at all (i.e. not in generator mode)
+ if fn_out is None:
+ return None
+ return {**tags, **fn_out}
+ return entry
+ return runner
+
+
+def with_args(create_args: Callable[[], Iterable[Any]]):
+ """
+ Decorator factory, adds given extra arguments to the decorated function.
+ :param create_args: function to create arguments with.
+ :return: Decorator.
+ """
+ def runner(fn):
+ # this wraps the function, to hide that the function actually yielding data.
+ def entry(*args, **kw):
+ return fn(*(list(create_args()) + list(args)), **kw)
+ return entry
+ return runner
diff --git a/test_libs/pyspec/eth2spec/utils/bls.py b/test_libs/pyspec/eth2spec/utils/bls.py
new file mode 100644
index 0000000000..52f1fed632
--- /dev/null
+++ b/test_libs/pyspec/eth2spec/utils/bls.py
@@ -0,0 +1,46 @@
+from py_ecc import bls
+
+# Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing.
+bls_active = True
+
+STUB_SIGNATURE = b'\x11' * 96
+STUB_PUBKEY = b'\x22' * 48
+
+
+def only_with_bls(alt_return=None):
+ """
+ Decorator factory to make a function only run when BLS is active. Otherwise return the default.
+ """
+ def runner(fn):
+ def entry(*args, **kw):
+ if bls_active:
+ return fn(*args, **kw)
+ else:
+ return alt_return
+ return entry
+ return runner
+
+
+@only_with_bls(alt_return=True)
+def bls_verify(pubkey, message_hash, signature, domain):
+ return bls.verify(message_hash=message_hash, pubkey=pubkey, signature=signature, domain=domain)
+
+
+@only_with_bls(alt_return=True)
+def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
+ return bls.verify_multiple(pubkeys, message_hashes, signature, domain)
+
+
+@only_with_bls(alt_return=STUB_PUBKEY)
+def bls_aggregate_pubkeys(pubkeys):
+ return bls.aggregate_pubkeys(pubkeys)
+
+
+@only_with_bls(alt_return=STUB_SIGNATURE)
+def bls_aggregate_signatures(signatures):
+ return bls.aggregate_signatures(signatures)
+
+
+@only_with_bls(alt_return=STUB_SIGNATURE)
+def bls_sign(message_hash, privkey, domain):
+ return bls.sign(message_hash=message_hash, privkey=privkey, domain=domain)
diff --git a/test_libs/pyspec/eth2spec/utils/bls_stub.py b/test_libs/pyspec/eth2spec/utils/bls_stub.py
deleted file mode 100644
index ae97de175b..0000000000
--- a/test_libs/pyspec/eth2spec/utils/bls_stub.py
+++ /dev/null
@@ -1,12 +0,0 @@
-
-
-def bls_verify(pubkey, message_hash, signature, domain):
- return True
-
-
-def bls_verify_multiple(pubkeys, message_hashes, signature, domain):
- return True
-
-
-def bls_aggregate_pubkeys(pubkeys):
- return b'\x42' * 48
diff --git a/test_libs/pyspec/eth2spec/utils/hash_function.py b/test_libs/pyspec/eth2spec/utils/hash_function.py
index 3fee63d82d..acd13edc40 100644
--- a/test_libs/pyspec/eth2spec/utils/hash_function.py
+++ b/test_libs/pyspec/eth2spec/utils/hash_function.py
@@ -1,6 +1,4 @@
from hashlib import sha256
-# from eth_utils import keccak
def hash(x): return sha256(x).digest()
-# def hash(x): return keccak(x)
diff --git a/test_libs/pyspec/tests/block_processing/test_process_attestation.py b/test_libs/pyspec/tests/block_processing/test_process_attestation.py
deleted file mode 100644
index 763178717a..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_attestation.py
+++ /dev/null
@@ -1,195 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-
-from eth2spec.phase0.spec import (
- get_current_epoch,
- process_attestation,
- slot_to_epoch,
- state_transition,
-)
-from tests.helpers import (
- build_empty_block_for_next_slot,
- get_valid_attestation,
- next_epoch,
- next_slot,
-)
-
-
-# mark entire file as 'attestations'
-pytestmark = pytest.mark.attestations
-
-
-def run_attestation_processing(state, attestation, valid=True):
- """
- Run ``process_attestation`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_attestation(post_state, attestation)
- return state, None
-
- process_attestation(post_state, attestation)
-
- current_epoch = get_current_epoch(state)
- if attestation.data.target_epoch == current_epoch:
- assert len(post_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1
- else:
- assert len(post_state.previous_epoch_attestations) == len(state.previous_epoch_attestations) + 1
-
- return state, post_state
-
-
-def test_success(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- pre_state, post_state = run_attestation_processing(state, attestation)
-
- return pre_state, attestation, post_state
-
-
-def test_success_prevous_epoch(state):
- attestation = get_valid_attestation(state)
- block = build_empty_block_for_next_slot(state)
- block.slot = state.slot + spec.SLOTS_PER_EPOCH
- state_transition(state, block)
-
- pre_state, post_state = run_attestation_processing(state, attestation)
-
- return pre_state, attestation, post_state
-
-
-def test_success_since_max_epochs_per_crosslink(state):
- for _ in range(spec.MAX_EPOCHS_PER_CROSSLINK + 2):
- next_epoch(state)
-
- attestation = get_valid_attestation(state)
- data = attestation.data
- assert data.crosslink.end_epoch - data.crosslink.start_epoch == spec.MAX_EPOCHS_PER_CROSSLINK
-
- for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
- next_slot(state)
-
- pre_state, post_state = run_attestation_processing(state, attestation)
-
- return pre_state, attestation, post_state
-
-
-def test_before_inclusion_delay(state):
- attestation = get_valid_attestation(state)
- # do not increment slot to allow for inclusion delay
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_after_epoch_slots(state):
- attestation = get_valid_attestation(state)
- block = build_empty_block_for_next_slot(state)
- # increment past latest inclusion slot
- block.slot = state.slot + spec.SLOTS_PER_EPOCH + 1
- state_transition(state, block)
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_bad_source_epoch(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- attestation.data.source_epoch += 10
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_bad_source_root(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- attestation.data.source_root = b'\x42' * 32
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_non_zero_crosslink_data_root(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- attestation.data.crosslink.data_root = b'\x42' * 32
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_bad_previous_crosslink(state):
- next_epoch(state)
- attestation = get_valid_attestation(state)
- for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
- next_slot(state)
-
- attestation.data.crosslink.parent_root = b'\x27' * 32
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_bad_crosslink_start_epoch(state):
- next_epoch(state)
- attestation = get_valid_attestation(state)
- for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
- next_slot(state)
-
- attestation.data.crosslink.start_epoch += 1
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_bad_crosslink_end_epoch(state):
- next_epoch(state)
- attestation = get_valid_attestation(state)
- for _ in range(spec.MIN_ATTESTATION_INCLUSION_DELAY):
- next_slot(state)
-
- attestation.data.crosslink.end_epoch += 1
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_non_empty_custody_bitfield(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- attestation.custody_bitfield = deepcopy(attestation.aggregation_bitfield)
-
- pre_state, post_state = run_attestation_processing(state, attestation, False)
-
- return pre_state, attestation, post_state
-
-
-def test_empty_aggregation_bitfield(state):
- attestation = get_valid_attestation(state)
- state.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
-
- attestation.aggregation_bitfield = b'\x00' * len(attestation.aggregation_bitfield)
-
- pre_state, post_state = run_attestation_processing(state, attestation)
-
- return pre_state, attestation, post_state
diff --git a/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py
deleted file mode 100644
index 2ea16f13d9..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_attester_slashing.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-from eth2spec.phase0.spec import (
- get_beacon_proposer_index,
- process_attester_slashing,
-)
-from tests.helpers import (
- get_balance,
- get_valid_attester_slashing,
- next_epoch,
-)
-
-# mark entire file as 'attester_slashing'
-pytestmark = pytest.mark.attester_slashings
-
-
-def run_attester_slashing_processing(state, attester_slashing, valid=True):
- """
- Run ``process_attester_slashing`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_attester_slashing(post_state, attester_slashing)
- return state, None
-
- process_attester_slashing(post_state, attester_slashing)
-
- slashed_index = attester_slashing.attestation_1.custody_bit_0_indices[0]
- slashed_validator = post_state.validator_registry[slashed_index]
- assert slashed_validator.slashed
- assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
- assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
- # lost whistleblower reward
- assert (
- get_balance(post_state, slashed_index) <
- get_balance(state, slashed_index)
- )
- proposer_index = get_beacon_proposer_index(state)
- # gained whistleblower reward
- assert (
- get_balance(post_state, proposer_index) >
- get_balance(state, proposer_index)
- )
-
- return state, post_state
-
-
-def test_success_double(state):
- attester_slashing = get_valid_attester_slashing(state)
-
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing)
-
- return pre_state, attester_slashing, post_state
-
-
-def test_success_surround(state):
- next_epoch(state)
- state.current_justified_epoch += 1
- attester_slashing = get_valid_attester_slashing(state)
-
- # set attestion1 to surround attestation 2
- attester_slashing.attestation_1.data.source_epoch = attester_slashing.attestation_2.data.source_epoch - 1
- attester_slashing.attestation_1.data.target_epoch = attester_slashing.attestation_2.data.target_epoch + 1
-
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing)
-
- return pre_state, attester_slashing, post_state
-
-
-def test_same_data(state):
- attester_slashing = get_valid_attester_slashing(state)
-
- attester_slashing.attestation_1.data = attester_slashing.attestation_2.data
-
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
-
- return pre_state, attester_slashing, post_state
-
-
-def test_no_double_or_surround(state):
- attester_slashing = get_valid_attester_slashing(state)
-
- attester_slashing.attestation_1.data.target_epoch += 1
-
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
-
- return pre_state, attester_slashing, post_state
-
-
-def test_participants_already_slashed(state):
- attester_slashing = get_valid_attester_slashing(state)
-
- # set all indices to slashed
- attestation_1 = attester_slashing.attestation_1
- validator_indices = attestation_1.custody_bit_0_indices + attestation_1.custody_bit_1_indices
- for index in validator_indices:
- state.validator_registry[index].slashed = True
-
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
-
- return pre_state, attester_slashing, post_state
-
-
-def test_custody_bit_0_and_1(state):
- attester_slashing = get_valid_attester_slashing(state)
-
- attester_slashing.attestation_1.custody_bit_1_indices = (
- attester_slashing.attestation_1.custody_bit_0_indices
- )
- pre_state, post_state = run_attester_slashing_processing(state, attester_slashing, False)
-
- return pre_state, attester_slashing, post_state
diff --git a/test_libs/pyspec/tests/block_processing/test_process_block_header.py b/test_libs/pyspec/tests/block_processing/test_process_block_header.py
deleted file mode 100644
index 32b409e5ab..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_block_header.py
+++ /dev/null
@@ -1,76 +0,0 @@
-from copy import deepcopy
-import pytest
-
-
-from eth2spec.phase0.spec import (
- get_beacon_proposer_index,
- process_slot,
- process_block_header,
-)
-from tests.helpers import (
- advance_slot,
- build_empty_block_for_next_slot,
- next_slot,
-)
-
-# mark entire file as 'header'
-pytestmark = pytest.mark.header
-
-
-def prepare_state_for_header_processing(state):
- process_slot(state)
- advance_slot(state)
-
-
-def run_block_header_processing(state, block, valid=True):
- """
- Run ``process_block_header`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- prepare_state_for_header_processing(state)
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_block_header(post_state, block)
- return state, None
-
- process_block_header(post_state, block)
- return state, post_state
-
-
-def test_success(state):
- block = build_empty_block_for_next_slot(state)
- pre_state, post_state = run_block_header_processing(state, block)
- return state, block, post_state
-
-
-def test_invalid_slot(state):
- block = build_empty_block_for_next_slot(state)
- block.slot = state.slot + 2 # invalid slot
-
- pre_state, post_state = run_block_header_processing(state, block, valid=False)
- return pre_state, block, None
-
-
-def test_invalid_parent_block_root(state):
- block = build_empty_block_for_next_slot(state)
- block.parent_root = b'\12' * 32 # invalid prev root
-
- pre_state, post_state = run_block_header_processing(state, block, valid=False)
- return pre_state, block, None
-
-
-def test_proposer_slashed(state):
- # use stub state to get proposer index of next slot
- stub_state = deepcopy(state)
- next_slot(stub_state)
- proposer_index = get_beacon_proposer_index(stub_state)
-
- # set proposer to slashed
- state.validator_registry[proposer_index].slashed = True
-
- block = build_empty_block_for_next_slot(state)
-
- pre_state, post_state = run_block_header_processing(state, block, valid=False)
- return pre_state, block, None
diff --git a/test_libs/pyspec/tests/block_processing/test_process_deposit.py b/test_libs/pyspec/tests/block_processing/test_process_deposit.py
deleted file mode 100644
index bbfb390efb..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_deposit.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-
-from eth2spec.phase0.spec import (
- ZERO_HASH,
- process_deposit,
-)
-from tests.helpers import (
- get_balance,
- build_deposit,
- privkeys,
- pubkeys,
-)
-
-
-# mark entire file as 'deposits'
-pytestmark = pytest.mark.deposits
-
-
-def test_success(state):
- pre_state = deepcopy(state)
- # fill previous deposits with zero-hash
- deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- index = len(deposit_data_leaves)
- pubkey = pubkeys[index]
- privkey = privkeys[index]
- deposit, root, deposit_data_leaves = build_deposit(
- pre_state,
- deposit_data_leaves,
- pubkey,
- privkey,
- spec.MAX_EFFECTIVE_BALANCE,
- )
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
-
- post_state = deepcopy(pre_state)
-
- process_deposit(post_state, deposit)
-
- assert len(post_state.validator_registry) == len(state.validator_registry) + 1
- assert len(post_state.balances) == len(state.balances) + 1
- assert post_state.validator_registry[index].pubkey == pubkeys[index]
- assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE
- assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
-
- return pre_state, deposit, post_state
-
-
-def test_success_top_up(state):
- pre_state = deepcopy(state)
- deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- validator_index = 0
- amount = spec.MAX_EFFECTIVE_BALANCE // 4
- pubkey = pubkeys[validator_index]
- privkey = privkeys[validator_index]
- deposit, root, deposit_data_leaves = build_deposit(
- pre_state,
- deposit_data_leaves,
- pubkey,
- privkey,
- amount,
- )
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
- pre_balance = get_balance(pre_state, validator_index)
-
- post_state = deepcopy(pre_state)
-
- process_deposit(post_state, deposit)
-
- assert len(post_state.validator_registry) == len(state.validator_registry)
- assert len(post_state.balances) == len(state.balances)
- assert post_state.deposit_index == post_state.latest_eth1_data.deposit_count
- assert get_balance(post_state, validator_index) == pre_balance + amount
-
- return pre_state, deposit, post_state
-
-
-def test_wrong_index(state):
- pre_state = deepcopy(state)
- deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- index = len(deposit_data_leaves)
- pubkey = pubkeys[index]
- privkey = privkeys[index]
- deposit, root, deposit_data_leaves = build_deposit(
- pre_state,
- deposit_data_leaves,
- pubkey,
- privkey,
- spec.MAX_EFFECTIVE_BALANCE,
- )
-
- # mess up deposit_index
- deposit.index = pre_state.deposit_index + 1
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
-
- post_state = deepcopy(pre_state)
-
- with pytest.raises(AssertionError):
- process_deposit(post_state, deposit)
-
- return pre_state, deposit, None
-
-
-def test_bad_merkle_proof(state):
- pre_state = deepcopy(state)
- deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- index = len(deposit_data_leaves)
- pubkey = pubkeys[index]
- privkey = privkeys[index]
- deposit, root, deposit_data_leaves = build_deposit(
- pre_state,
- deposit_data_leaves,
- pubkey,
- privkey,
- spec.MAX_EFFECTIVE_BALANCE,
- )
-
- # mess up merkle branch
- deposit.proof[-1] = spec.ZERO_HASH
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(deposit_data_leaves)
-
- post_state = deepcopy(pre_state)
-
- with pytest.raises(AssertionError):
- process_deposit(post_state, deposit)
-
- return pre_state, deposit, None
diff --git a/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py b/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py
deleted file mode 100644
index 4752210366..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_proposer_slashing.py
+++ /dev/null
@@ -1,96 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-from eth2spec.phase0.spec import (
- get_current_epoch,
- process_proposer_slashing,
-)
-from tests.helpers import (
- get_balance,
- get_valid_proposer_slashing,
-)
-
-# mark entire file as 'proposer_slashings'
-pytestmark = pytest.mark.proposer_slashings
-
-
-def run_proposer_slashing_processing(state, proposer_slashing, valid=True):
- """
- Run ``process_proposer_slashing`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_proposer_slashing(post_state, proposer_slashing)
- return state, None
-
- process_proposer_slashing(post_state, proposer_slashing)
-
- slashed_validator = post_state.validator_registry[proposer_slashing.proposer_index]
- assert slashed_validator.slashed
- assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
- assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
- # lost whistleblower reward
- assert (
- get_balance(post_state, proposer_slashing.proposer_index) <
- get_balance(state, proposer_slashing.proposer_index)
- )
-
- return state, post_state
-
-
-def test_success(state):
- proposer_slashing = get_valid_proposer_slashing(state)
-
- pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing)
-
- return pre_state, proposer_slashing, post_state
-
-
-def test_epochs_are_different(state):
- proposer_slashing = get_valid_proposer_slashing(state)
-
- # set slots to be in different epochs
- proposer_slashing.header_2.slot += spec.SLOTS_PER_EPOCH
-
- pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
-
- return pre_state, proposer_slashing, post_state
-
-
-def test_headers_are_same(state):
- proposer_slashing = get_valid_proposer_slashing(state)
-
- # set headers to be the same
- proposer_slashing.header_2 = proposer_slashing.header_1
-
- pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
-
- return pre_state, proposer_slashing, post_state
-
-
-def test_proposer_is_slashed(state):
- proposer_slashing = get_valid_proposer_slashing(state)
-
- # set proposer to slashed
- state.validator_registry[proposer_slashing.proposer_index].slashed = True
-
- pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
-
- return pre_state, proposer_slashing, post_state
-
-
-def test_proposer_is_withdrawn(state):
- proposer_slashing = get_valid_proposer_slashing(state)
-
- # set proposer withdrawable_epoch in past
- current_epoch = get_current_epoch(state)
- proposer_index = proposer_slashing.proposer_index
- state.validator_registry[proposer_index].withdrawable_epoch = current_epoch - 1
-
- pre_state, post_state = run_proposer_slashing_processing(state, proposer_slashing, False)
-
- return pre_state, proposer_slashing, post_state
diff --git a/test_libs/pyspec/tests/block_processing/test_process_transfer.py b/test_libs/pyspec/tests/block_processing/test_process_transfer.py
deleted file mode 100644
index 0eeaa77929..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_process_transfer.py
+++ /dev/null
@@ -1,141 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-
-from eth2spec.phase0.spec import (
- get_active_validator_indices,
- get_beacon_proposer_index,
- get_current_epoch,
- process_transfer,
-)
-from tests.helpers import (
- get_valid_transfer,
- next_epoch,
-)
-
-
-# mark entire file as 'transfers'
-pytestmark = pytest.mark.transfers
-
-
-def run_transfer_processing(state, transfer, valid=True):
- """
- Run ``process_transfer`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_transfer(post_state, transfer)
- return state, None
-
-
- process_transfer(post_state, transfer)
-
- proposer_index = get_beacon_proposer_index(state)
- pre_transfer_sender_balance = state.balances[transfer.sender]
- pre_transfer_recipient_balance = state.balances[transfer.recipient]
- pre_transfer_proposer_balance = state.balances[proposer_index]
- sender_balance = post_state.balances[transfer.sender]
- recipient_balance = post_state.balances[transfer.recipient]
- assert sender_balance == pre_transfer_sender_balance - transfer.amount - transfer.fee
- assert recipient_balance == pre_transfer_recipient_balance + transfer.amount
- assert post_state.balances[proposer_index] == pre_transfer_proposer_balance + transfer.fee
-
- return state, post_state
-
-
-def test_success_non_activated(state):
- transfer = get_valid_transfer(state)
- # un-activate so validator can transfer
- state.validator_registry[transfer.sender].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
-
- pre_state, post_state = run_transfer_processing(state, transfer)
-
- return pre_state, transfer, post_state
-
-
-def test_success_withdrawable(state):
- next_epoch(state)
-
- transfer = get_valid_transfer(state)
-
- # withdrawable_epoch in past so can transfer
- state.validator_registry[transfer.sender].withdrawable_epoch = get_current_epoch(state) - 1
-
- pre_state, post_state = run_transfer_processing(state, transfer)
-
- return pre_state, transfer, post_state
-
-
-def test_success_active_above_max_effective(state):
- sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
- amount = spec.MAX_EFFECTIVE_BALANCE // 32
- state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE + amount
- transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
-
- pre_state, post_state = run_transfer_processing(state, transfer)
-
- return pre_state, transfer, post_state
-
-
-def test_active_but_transfer_past_effective_balance(state):
- sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
- amount = spec.MAX_EFFECTIVE_BALANCE // 32
- state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
- transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount, fee=0)
-
- pre_state, post_state = run_transfer_processing(state, transfer, False)
-
- return pre_state, transfer, post_state
-
-
-def test_incorrect_slot(state):
- transfer = get_valid_transfer(state, slot=state.slot+1)
- # un-activate so validator can transfer
- state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
-
- pre_state, post_state = run_transfer_processing(state, transfer, False)
-
- return pre_state, transfer, post_state
-
-
-def test_insufficient_balance(state):
- sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
- amount = spec.MAX_EFFECTIVE_BALANCE
- state.balances[sender_index] = spec.MAX_EFFECTIVE_BALANCE
- transfer = get_valid_transfer(state, sender_index=sender_index, amount=amount + 1, fee=0)
-
- # un-activate so validator can transfer
- state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
-
- pre_state, post_state = run_transfer_processing(state, transfer, False)
-
- return pre_state, transfer, post_state
-
-
-def test_no_dust(state):
- sender_index = get_active_validator_indices(state, get_current_epoch(state))[-1]
- balance = state.balances[sender_index]
- transfer = get_valid_transfer(state, sender_index=sender_index, amount=balance - spec.MIN_DEPOSIT_AMOUNT + 1, fee=0)
-
- # un-activate so validator can transfer
- state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
-
- pre_state, post_state = run_transfer_processing(state, transfer, False)
-
- return pre_state, transfer, post_state
-
-
-def test_invalid_pubkey(state):
- transfer = get_valid_transfer(state)
- state.validator_registry[transfer.sender].withdrawal_credentials = spec.ZERO_HASH
-
- # un-activate so validator can transfer
- state.validator_registry[transfer.sender].activation_epoch = spec.FAR_FUTURE_EPOCH
-
- pre_state, post_state = run_transfer_processing(state, transfer, False)
-
- return pre_state, transfer, post_state
diff --git a/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py b/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py
deleted file mode 100644
index c58c5238a9..0000000000
--- a/test_libs/pyspec/tests/block_processing/test_voluntary_exit.py
+++ /dev/null
@@ -1,163 +0,0 @@
-from copy import deepcopy
-import pytest
-
-import eth2spec.phase0.spec as spec
-
-from eth2spec.phase0.spec import (
- get_active_validator_indices,
- get_churn_limit,
- get_current_epoch,
- process_voluntary_exit,
-)
-from tests.helpers import (
- build_voluntary_exit,
- pubkey_to_privkey,
-)
-
-
-# mark entire file as 'voluntary_exits'
-pytestmark = pytest.mark.voluntary_exits
-
-
-def run_voluntary_exit_processing(state, voluntary_exit, valid=True):
- """
- Run ``process_voluntary_exit`` returning the pre and post state.
- If ``valid == False``, run expecting ``AssertionError``
- """
- post_state = deepcopy(state)
-
- if not valid:
- with pytest.raises(AssertionError):
- process_voluntary_exit(post_state, voluntary_exit)
- return state, None
-
- process_voluntary_exit(post_state, voluntary_exit)
-
- validator_index = voluntary_exit.validator_index
- assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
- assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
-
- return state, post_state
-
-
-def test_success(state):
- # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
- state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
-
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[0]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
-
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- validator_index,
- privkey,
- )
-
- pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit)
- return pre_state, voluntary_exit, post_state
-
-
-def test_success_exit_queue(state):
- # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
- state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
-
- current_epoch = get_current_epoch(state)
-
- # exit `MAX_EXITS_PER_EPOCH`
- initial_indices = get_active_validator_indices(state, current_epoch)[:get_churn_limit(state)]
- post_state = state
- for index in initial_indices:
- privkey = pubkey_to_privkey[state.validator_registry[index].pubkey]
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- index,
- privkey,
- )
-
- pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit)
-
- # exit an additional validator
- validator_index = get_active_validator_indices(state, current_epoch)[-1]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- validator_index,
- privkey,
- )
-
- pre_state, post_state = run_voluntary_exit_processing(post_state, voluntary_exit)
-
- assert (
- post_state.validator_registry[validator_index].exit_epoch ==
- post_state.validator_registry[initial_indices[0]].exit_epoch + 1
- )
-
- return pre_state, voluntary_exit, post_state
-
-
-def test_validator_not_active(state):
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[0]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
-
- state.validator_registry[validator_index].activation_epoch = spec.FAR_FUTURE_EPOCH
-
- #
- # build and test voluntary exit
- #
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- validator_index,
- privkey,
- )
-
- pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
- return pre_state, voluntary_exit, post_state
-
-
-def test_validator_already_exited(state):
- # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow validator able to exit
- state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
-
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[0]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
-
- # but validator already has exited
- state.validator_registry[validator_index].exit_epoch = current_epoch + 2
-
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- validator_index,
- privkey,
- )
-
- pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
- return pre_state, voluntary_exit, post_state
-
-
-def test_validator_not_active_long_enough(state):
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[0]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
-
- voluntary_exit = build_voluntary_exit(
- state,
- current_epoch,
- validator_index,
- privkey,
- )
-
- assert (
- current_epoch - state.validator_registry[validator_index].activation_epoch <
- spec.PERSISTENT_COMMITTEE_PERIOD
- )
-
- pre_state, post_state = run_voluntary_exit_processing(state, voluntary_exit, False)
- return pre_state, voluntary_exit, post_state
diff --git a/test_libs/pyspec/tests/conftest.py b/test_libs/pyspec/tests/conftest.py
deleted file mode 100644
index 9840dc7b20..0000000000
--- a/test_libs/pyspec/tests/conftest.py
+++ /dev/null
@@ -1,36 +0,0 @@
-import pytest
-
-from eth2spec.phase0 import spec
-from preset_loader import loader
-
-from .helpers import (
- create_genesis_state,
-)
-
-
-def pytest_addoption(parser):
- parser.addoption(
- "--config", action="store", default="minimal", help="config: make the pyspec use the specified configuration"
- )
-
-
[email protected](autouse=True)
-def config(request):
- config_name = request.config.getoption("--config")
- presets = loader.load_presets('../../configs/', config_name)
- spec.apply_constants_preset(presets)
-
-
[email protected]
-def num_validators(config):
- return spec.SLOTS_PER_EPOCH * 8
-
-
[email protected]
-def deposit_data_leaves():
- return list()
-
-
[email protected]
-def state(num_validators, deposit_data_leaves):
- return create_genesis_state(num_validators, deposit_data_leaves)
diff --git a/test_libs/pyspec/tests/helpers.py b/test_libs/pyspec/tests/helpers.py
deleted file mode 100644
index a4849bfbbc..0000000000
--- a/test_libs/pyspec/tests/helpers.py
+++ /dev/null
@@ -1,429 +0,0 @@
-from copy import deepcopy
-
-from py_ecc import bls
-
-import eth2spec.phase0.spec as spec
-from eth2spec.utils.minimal_ssz import signing_root
-from eth2spec.phase0.spec import (
- # constants
- ZERO_HASH,
- MAX_EPOCHS_PER_CROSSLINK,
- # SSZ
- Attestation,
- AttestationData,
- AttestationDataAndCustodyBit,
- AttesterSlashing,
- BeaconBlock,
- BeaconBlockHeader,
- Crosslink,
- Deposit,
- DepositData,
- Eth1Data,
- ProposerSlashing,
- Transfer,
- VoluntaryExit,
- # functions
- convert_to_indexed,
- bls_domain,
- get_active_validator_indices,
- get_attesting_indices,
- get_block_root,
- get_block_root_at_slot,
- get_crosslink_committee,
- get_current_epoch,
- get_domain,
- get_epoch_start_slot,
- get_genesis_beacon_state,
- get_previous_epoch,
- get_shard_delta,
- hash_tree_root,
- slot_to_epoch,
- state_transition,
- verify_merkle_branch,
- hash,
-)
-from eth2spec.utils.merkle_minimal import (
- calc_merkle_tree_from_leaves,
- get_merkle_proof,
- get_merkle_root,
-)
-
-
-privkeys = [i + 1 for i in range(1024)]
-pubkeys = [bls.privtopub(privkey) for privkey in privkeys]
-pubkey_to_privkey = {pubkey: privkey for privkey, pubkey in zip(privkeys, pubkeys)}
-
-
-def advance_slot(state) -> None:
- state.slot += 1
-
-
-def get_balance(state, index):
- return state.balances[index]
-
-
-def set_bitfield_bit(bitfield, i):
- """
- Set the bit in ``bitfield`` at position ``i`` to ``1``.
- """
- byte_index = i // 8
- bit_index = i % 8
- return (
- bitfield[:byte_index] +
- bytes([bitfield[byte_index] | (1 << bit_index)]) +
- bitfield[byte_index+1:]
- )
-
-
-def create_mock_genesis_validator_deposits(num_validators, deposit_data_leaves=None):
- if not deposit_data_leaves:
- deposit_data_leaves = []
- signature = b'\x33' * 96
-
- deposit_data_list = []
- for i in range(num_validators):
- pubkey = pubkeys[i]
- deposit_data = DepositData(
- pubkey=pubkey,
- # insecurely use pubkey as withdrawal key as well
- withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
- amount=spec.MAX_EFFECTIVE_BALANCE,
- signature=signature,
- )
- item = deposit_data.hash_tree_root()
- deposit_data_leaves.append(item)
- tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
- root = get_merkle_root((tuple(deposit_data_leaves)))
- proof = list(get_merkle_proof(tree, item_index=i))
- assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, i, root)
- deposit_data_list.append(deposit_data)
-
- genesis_validator_deposits = []
- for i in range(num_validators):
- genesis_validator_deposits.append(Deposit(
- proof=list(get_merkle_proof(tree, item_index=i)),
- index=i,
- data=deposit_data_list[i]
- ))
- return genesis_validator_deposits, root
-
-
-def create_genesis_state(num_validators, deposit_data_leaves=None):
- initial_deposits, deposit_root = create_mock_genesis_validator_deposits(
- num_validators,
- deposit_data_leaves,
- )
- return get_genesis_beacon_state(
- initial_deposits,
- genesis_time=0,
- genesis_eth1_data=Eth1Data(
- deposit_root=deposit_root,
- deposit_count=len(initial_deposits),
- block_hash=spec.ZERO_HASH,
- ),
- )
-
-
-def build_empty_block_for_next_slot(state):
- empty_block = BeaconBlock()
- empty_block.slot = state.slot + 1
- empty_block.body.eth1_data.deposit_count = state.deposit_index
- previous_block_header = deepcopy(state.latest_block_header)
- if previous_block_header.state_root == spec.ZERO_HASH:
- previous_block_header.state_root = state.hash_tree_root()
- empty_block.parent_root = signing_root(previous_block_header)
- return empty_block
-
-
-def build_deposit_data(state, pubkey, privkey, amount):
- deposit_data = DepositData(
- pubkey=pubkey,
- # insecurely use pubkey as withdrawal key as well
- withdrawal_credentials=spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(pubkey)[1:],
- amount=amount,
- )
- signature = bls.sign(
- message_hash=signing_root(deposit_data),
- privkey=privkey,
- domain=bls_domain(spec.DOMAIN_DEPOSIT),
- )
- deposit_data.signature = signature
- return deposit_data
-
-
-def build_attestation_data(state, slot, shard):
- assert state.slot >= slot
-
- if slot == state.slot:
- block_root = build_empty_block_for_next_slot(state).parent_root
- else:
- block_root = get_block_root_at_slot(state, slot)
-
- current_epoch_start_slot = get_epoch_start_slot(get_current_epoch(state))
- if slot < current_epoch_start_slot:
- epoch_boundary_root = get_block_root(state, get_previous_epoch(state))
- elif slot == current_epoch_start_slot:
- epoch_boundary_root = block_root
- else:
- epoch_boundary_root = get_block_root(state, get_current_epoch(state))
-
- if slot < current_epoch_start_slot:
- justified_epoch = state.previous_justified_epoch
- justified_block_root = state.previous_justified_root
- else:
- justified_epoch = state.current_justified_epoch
- justified_block_root = state.current_justified_root
-
- crosslinks = state.current_crosslinks if slot_to_epoch(slot) == get_current_epoch(state) else state.previous_crosslinks
- parent_crosslink = crosslinks[shard]
- return AttestationData(
- beacon_block_root=block_root,
- source_epoch=justified_epoch,
- source_root=justified_block_root,
- target_epoch=slot_to_epoch(slot),
- target_root=epoch_boundary_root,
- crosslink=Crosslink(
- shard=shard,
- start_epoch=parent_crosslink.end_epoch,
- end_epoch=min(slot_to_epoch(slot), parent_crosslink.end_epoch + MAX_EPOCHS_PER_CROSSLINK),
- data_root=spec.ZERO_HASH,
- parent_root=hash_tree_root(parent_crosslink),
- ),
- )
-
-
-def build_voluntary_exit(state, epoch, validator_index, privkey):
- voluntary_exit = VoluntaryExit(
- epoch=epoch,
- validator_index=validator_index,
- )
- voluntary_exit.signature = bls.sign(
- message_hash=signing_root(voluntary_exit),
- privkey=privkey,
- domain=get_domain(
- state=state,
- domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
- message_epoch=epoch,
- )
- )
-
- return voluntary_exit
-
-
-def build_deposit(state,
- deposit_data_leaves,
- pubkey,
- privkey,
- amount):
- deposit_data = build_deposit_data(state, pubkey, privkey, amount)
-
- item = deposit_data.hash_tree_root()
- index = len(deposit_data_leaves)
- deposit_data_leaves.append(item)
- tree = calc_merkle_tree_from_leaves(tuple(deposit_data_leaves))
- root = get_merkle_root((tuple(deposit_data_leaves)))
- proof = list(get_merkle_proof(tree, item_index=index))
- assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
-
- deposit = Deposit(
- proof=list(proof),
- index=index,
- data=deposit_data,
- )
-
- return deposit, root, deposit_data_leaves
-
-
-def get_valid_proposer_slashing(state):
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[-1]
- privkey = pubkey_to_privkey[state.validator_registry[validator_index].pubkey]
- slot = state.slot
-
- header_1 = BeaconBlockHeader(
- slot=slot,
- parent_root=ZERO_HASH,
- state_root=ZERO_HASH,
- body_root=ZERO_HASH,
- )
- header_2 = deepcopy(header_1)
- header_2.parent_root = b'\x02' * 32
- header_2.slot = slot + 1
-
- domain = get_domain(
- state=state,
- domain_type=spec.DOMAIN_BEACON_PROPOSER,
- )
- header_1.signature = bls.sign(
- message_hash=signing_root(header_1),
- privkey=privkey,
- domain=domain,
- )
- header_2.signature = bls.sign(
- message_hash=signing_root(header_2),
- privkey=privkey,
- domain=domain,
- )
-
- return ProposerSlashing(
- proposer_index=validator_index,
- header_1=header_1,
- header_2=header_2,
- )
-
-
-def get_valid_attester_slashing(state):
- attestation_1 = get_valid_attestation(state)
- attestation_2 = deepcopy(attestation_1)
- attestation_2.data.target_root = b'\x01' * 32
-
- return AttesterSlashing(
- attestation_1=convert_to_indexed(state, attestation_1),
- attestation_2=convert_to_indexed(state, attestation_2),
- )
-
-
-def get_valid_attestation(state, slot=None):
- if slot is None:
- slot = state.slot
-
- if slot_to_epoch(slot) == get_current_epoch(state):
- shard = (state.latest_start_shard + slot) % spec.SLOTS_PER_EPOCH
- else:
- previous_shard_delta = get_shard_delta(state, get_previous_epoch(state))
- shard = (state.latest_start_shard - previous_shard_delta + slot) % spec.SHARD_COUNT
-
- attestation_data = build_attestation_data(state, slot, shard)
-
- crosslink_committee = get_crosslink_committee(state, attestation_data.target_epoch, attestation_data.crosslink.shard)
-
- committee_size = len(crosslink_committee)
- bitfield_length = (committee_size + 7) // 8
- aggregation_bitfield = b'\xC0' + b'\x00' * (bitfield_length - 1)
- custody_bitfield = b'\x00' * bitfield_length
- attestation = Attestation(
- aggregation_bitfield=aggregation_bitfield,
- data=attestation_data,
- custody_bitfield=custody_bitfield,
- )
- participants = get_attesting_indices(
- state,
- attestation.data,
- attestation.aggregation_bitfield,
- )
- assert len(participants) == 2
-
- signatures = []
- for validator_index in participants:
- privkey = privkeys[validator_index]
- signatures.append(
- get_attestation_signature(
- state,
- attestation.data,
- privkey
- )
- )
-
- attestation.aggregation_signature = bls.aggregate_signatures(signatures)
- return attestation
-
-
-def get_valid_transfer(state, slot=None, sender_index=None, amount=None, fee=None):
- if slot is None:
- slot = state.slot
- current_epoch = get_current_epoch(state)
- if sender_index is None:
- sender_index = get_active_validator_indices(state, current_epoch)[-1]
- recipient_index = get_active_validator_indices(state, current_epoch)[0]
- transfer_pubkey = pubkeys[-1]
- transfer_privkey = privkeys[-1]
-
- if fee is None:
- fee = get_balance(state, sender_index) // 32
- if amount is None:
- amount = get_balance(state, sender_index) - fee
-
- transfer = Transfer(
- sender=sender_index,
- recipient=recipient_index,
- amount=amount,
- fee=fee,
- slot=slot,
- pubkey=transfer_pubkey,
- signature=ZERO_HASH,
- )
- transfer.signature = bls.sign(
- message_hash=signing_root(transfer),
- privkey=transfer_privkey,
- domain=get_domain(
- state=state,
- domain_type=spec.DOMAIN_TRANSFER,
- message_epoch=get_current_epoch(state),
- )
- )
-
- # ensure withdrawal_credentials reproducable
- state.validator_registry[transfer.sender].withdrawal_credentials = (
- spec.BLS_WITHDRAWAL_PREFIX_BYTE + spec.hash(transfer.pubkey)[1:]
- )
-
- return transfer
-
-
-def get_attestation_signature(state, attestation_data, privkey, custody_bit=0b0):
- message_hash = AttestationDataAndCustodyBit(
- data=attestation_data,
- custody_bit=custody_bit,
- ).hash_tree_root()
-
- return bls.sign(
- message_hash=message_hash,
- privkey=privkey,
- domain=get_domain(
- state=state,
- domain_type=spec.DOMAIN_ATTESTATION,
- message_epoch=attestation_data.target_epoch,
- )
- )
-
-
-def fill_aggregate_attestation(state, attestation):
- crosslink_committee = get_crosslink_committee(state, attestation.data.target_epoch, attestation.data.crosslink.shard)
- for i in range(len(crosslink_committee)):
- attestation.aggregation_bitfield = set_bitfield_bit(attestation.aggregation_bitfield, i)
-
-
-def add_attestation_to_state(state, attestation, slot):
- block = build_empty_block_for_next_slot(state)
- block.slot = slot
- block.body.attestations.append(attestation)
- state_transition(state, block)
-
-
-def next_slot(state):
- """
- Transition to the next slot via an empty block.
- Return the empty block that triggered the transition.
- """
- block = build_empty_block_for_next_slot(state)
- state_transition(state, block)
- return block
-
-
-def next_epoch(state):
- """
- Transition to the start slot of the next epoch via an empty block.
- Return the empty block that triggered the transition.
- """
- block = build_empty_block_for_next_slot(state)
- block.slot += spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH)
- state_transition(state, block)
- return block
-
-
-def get_state_root(state, slot) -> bytes:
- """
- Return the state root at a recent ``slot``.
- """
- assert slot < state.slot <= slot + spec.SLOTS_PER_HISTORICAL_ROOT
- return state.latest_state_roots[slot % spec.SLOTS_PER_HISTORICAL_ROOT]
diff --git a/test_libs/pyspec/tests/test_sanity.py b/test_libs/pyspec/tests/test_sanity.py
deleted file mode 100644
index 1c05e6b53a..0000000000
--- a/test_libs/pyspec/tests/test_sanity.py
+++ /dev/null
@@ -1,436 +0,0 @@
-from copy import deepcopy
-
-import pytest
-
-from py_ecc import bls
-import eth2spec.phase0.spec as spec
-
-from eth2spec.utils.minimal_ssz import signing_root
-from eth2spec.phase0.spec import (
- # constants
- ZERO_HASH,
- SLOTS_PER_HISTORICAL_ROOT,
- # SSZ
- Deposit,
- Transfer,
- VoluntaryExit,
- # functions
- get_active_validator_indices,
- get_beacon_proposer_index,
- get_block_root_at_slot,
- get_current_epoch,
- get_domain,
- process_slot,
- verify_merkle_branch,
- state_transition,
- hash,
-)
-from eth2spec.utils.merkle_minimal import (
- calc_merkle_tree_from_leaves,
- get_merkle_proof,
- get_merkle_root,
-)
-from .helpers import (
- advance_slot,
- get_balance,
- build_deposit_data,
- build_empty_block_for_next_slot,
- fill_aggregate_attestation,
- get_state_root,
- get_valid_attestation,
- get_valid_attester_slashing,
- get_valid_proposer_slashing,
- next_slot,
- privkeys,
- pubkeys,
-)
-
-
-# mark entire file as 'sanity'
-pytestmark = pytest.mark.sanity
-
-
-def test_slot_transition(state):
- test_state = deepcopy(state)
- process_slot(test_state)
- advance_slot(test_state)
- assert test_state.slot == state.slot + 1
- assert get_state_root(test_state, state.slot) == state.hash_tree_root()
- return test_state
-
-
-def test_empty_block_transition(state):
- test_state = deepcopy(state)
-
- block = build_empty_block_for_next_slot(test_state)
- state_transition(test_state, block)
-
- assert len(test_state.eth1_data_votes) == len(state.eth1_data_votes) + 1
- assert get_block_root_at_slot(test_state, state.slot) == block.parent_root
-
- return state, [block], test_state
-
-
-def test_skipped_slots(state):
- test_state = deepcopy(state)
- block = build_empty_block_for_next_slot(test_state)
- block.slot += 3
-
- state_transition(test_state, block)
-
- assert test_state.slot == block.slot
- for slot in range(state.slot, test_state.slot):
- assert get_block_root_at_slot(test_state, slot) == block.parent_root
-
- return state, [block], test_state
-
-
-def test_empty_epoch_transition(state):
- test_state = deepcopy(state)
- block = build_empty_block_for_next_slot(test_state)
- block.slot += spec.SLOTS_PER_EPOCH
-
- state_transition(test_state, block)
-
- assert test_state.slot == block.slot
- for slot in range(state.slot, test_state.slot):
- assert get_block_root_at_slot(test_state, slot) == block.parent_root
-
- return state, [block], test_state
-
-
-def test_empty_epoch_transition_not_finalizing(state):
- test_state = deepcopy(state)
- block = build_empty_block_for_next_slot(test_state)
- block.slot += spec.SLOTS_PER_EPOCH * 5
-
- state_transition(test_state, block)
-
- assert test_state.slot == block.slot
- assert test_state.finalized_epoch < get_current_epoch(test_state) - 4
- for index in range(len(test_state.validator_registry)):
- assert get_balance(test_state, index) < get_balance(state, index)
-
- return state, [block], test_state
-
-
-def test_proposer_slashing(state):
- test_state = deepcopy(state)
- proposer_slashing = get_valid_proposer_slashing(state)
- validator_index = proposer_slashing.proposer_index
-
- #
- # Add to state via block transition
- #
- block = build_empty_block_for_next_slot(test_state)
- block.body.proposer_slashings.append(proposer_slashing)
- state_transition(test_state, block)
-
- assert not state.validator_registry[validator_index].slashed
-
- slashed_validator = test_state.validator_registry[validator_index]
- assert slashed_validator.slashed
- assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
- assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
- # lost whistleblower reward
- assert get_balance(test_state, validator_index) < get_balance(state, validator_index)
-
- return state, [block], test_state
-
-
-def test_attester_slashing(state):
- test_state = deepcopy(state)
- attester_slashing = get_valid_attester_slashing(state)
- validator_index = attester_slashing.attestation_1.custody_bit_0_indices[0]
-
- #
- # Add to state via block transition
- #
- block = build_empty_block_for_next_slot(test_state)
- block.body.attester_slashings.append(attester_slashing)
- state_transition(test_state, block)
-
- assert not state.validator_registry[validator_index].slashed
-
- slashed_validator = test_state.validator_registry[validator_index]
- assert slashed_validator.slashed
- assert slashed_validator.exit_epoch < spec.FAR_FUTURE_EPOCH
- assert slashed_validator.withdrawable_epoch < spec.FAR_FUTURE_EPOCH
- # lost whistleblower reward
- assert get_balance(test_state, validator_index) < get_balance(state, validator_index)
-
- proposer_index = get_beacon_proposer_index(test_state)
- # gained whistleblower reward
- assert (
- get_balance(test_state, proposer_index) >
- get_balance(state, proposer_index)
- )
-
- return state, [block], test_state
-
-
-def test_deposit_in_block(state):
- pre_state = deepcopy(state)
- test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- index = len(test_deposit_data_leaves)
- pubkey = pubkeys[index]
- privkey = privkeys[index]
- deposit_data = build_deposit_data(pre_state, pubkey, privkey, spec.MAX_EFFECTIVE_BALANCE)
-
- item = deposit_data.hash_tree_root()
- test_deposit_data_leaves.append(item)
- tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
- root = get_merkle_root((tuple(test_deposit_data_leaves)))
- proof = list(get_merkle_proof(tree, item_index=index))
- assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, index, root)
-
- deposit = Deposit(
- proof=list(proof),
- index=index,
- data=deposit_data,
- )
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves)
- post_state = deepcopy(pre_state)
- block = build_empty_block_for_next_slot(post_state)
- block.body.deposits.append(deposit)
-
- state_transition(post_state, block)
- assert len(post_state.validator_registry) == len(state.validator_registry) + 1
- assert len(post_state.balances) == len(state.balances) + 1
- assert get_balance(post_state, index) == spec.MAX_EFFECTIVE_BALANCE
- assert post_state.validator_registry[index].pubkey == pubkeys[index]
-
- return pre_state, [block], post_state
-
-
-def test_deposit_top_up(state):
- pre_state = deepcopy(state)
- test_deposit_data_leaves = [ZERO_HASH] * len(pre_state.validator_registry)
-
- validator_index = 0
- amount = spec.MAX_EFFECTIVE_BALANCE // 4
- pubkey = pubkeys[validator_index]
- privkey = privkeys[validator_index]
- deposit_data = build_deposit_data(pre_state, pubkey, privkey, amount)
-
- merkle_index = len(test_deposit_data_leaves)
- item = deposit_data.hash_tree_root()
- test_deposit_data_leaves.append(item)
- tree = calc_merkle_tree_from_leaves(tuple(test_deposit_data_leaves))
- root = get_merkle_root((tuple(test_deposit_data_leaves)))
- proof = list(get_merkle_proof(tree, item_index=merkle_index))
- assert verify_merkle_branch(item, proof, spec.DEPOSIT_CONTRACT_TREE_DEPTH, merkle_index, root)
-
- deposit = Deposit(
- proof=list(proof),
- index=merkle_index,
- data=deposit_data,
- )
-
- pre_state.latest_eth1_data.deposit_root = root
- pre_state.latest_eth1_data.deposit_count = len(test_deposit_data_leaves)
- block = build_empty_block_for_next_slot(pre_state)
- block.body.deposits.append(deposit)
-
- pre_balance = get_balance(pre_state, validator_index)
- post_state = deepcopy(pre_state)
- state_transition(post_state, block)
- assert len(post_state.validator_registry) == len(pre_state.validator_registry)
- assert len(post_state.balances) == len(pre_state.balances)
- assert get_balance(post_state, validator_index) == pre_balance + amount
-
- return pre_state, [block], post_state
-
-
-def test_attestation(state):
- state.slot = spec.SLOTS_PER_EPOCH
- test_state = deepcopy(state)
- attestation = get_valid_attestation(state)
-
- #
- # Add to state via block transition
- #
- attestation_block = build_empty_block_for_next_slot(test_state)
- attestation_block.slot += spec.MIN_ATTESTATION_INCLUSION_DELAY
- attestation_block.body.attestations.append(attestation)
- state_transition(test_state, attestation_block)
-
- assert len(test_state.current_epoch_attestations) == len(state.current_epoch_attestations) + 1
-
-
- #
- # Epoch transition should move to previous_epoch_attestations
- #
- pre_current_epoch_attestations = deepcopy(test_state.current_epoch_attestations)
-
- epoch_block = build_empty_block_for_next_slot(test_state)
- epoch_block.slot += spec.SLOTS_PER_EPOCH
- state_transition(test_state, epoch_block)
-
- assert len(test_state.current_epoch_attestations) == 0
- assert test_state.previous_epoch_attestations == pre_current_epoch_attestations
-
- return state, [attestation_block, epoch_block], test_state
-
-
-def test_voluntary_exit(state):
- pre_state = deepcopy(state)
- validator_index = get_active_validator_indices(
- pre_state,
- get_current_epoch(pre_state)
- )[-1]
-
- # move state forward PERSISTENT_COMMITTEE_PERIOD epochs to allow for exit
- pre_state.slot += spec.PERSISTENT_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH
-
- post_state = deepcopy(pre_state)
-
- voluntary_exit = VoluntaryExit(
- epoch=get_current_epoch(pre_state),
- validator_index=validator_index,
- )
- voluntary_exit.signature = bls.sign(
- message_hash=signing_root(voluntary_exit),
- privkey=privkeys[validator_index],
- domain=get_domain(
- state=pre_state,
- domain_type=spec.DOMAIN_VOLUNTARY_EXIT,
- )
- )
-
- #
- # Add to state via block transition
- #
- initiate_exit_block = build_empty_block_for_next_slot(post_state)
- initiate_exit_block.body.voluntary_exits.append(voluntary_exit)
- state_transition(post_state, initiate_exit_block)
-
- assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
-
- #
- # Process within epoch transition
- #
- exit_block = build_empty_block_for_next_slot(post_state)
- exit_block.slot += spec.SLOTS_PER_EPOCH
- state_transition(post_state, exit_block)
-
- assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
-
- return pre_state, [initiate_exit_block, exit_block], post_state
-
-
-def test_transfer(state):
- # overwrite default 0 to test
- spec.MAX_TRANSFERS = 1
-
- pre_state = deepcopy(state)
- current_epoch = get_current_epoch(pre_state)
- sender_index = get_active_validator_indices(pre_state, current_epoch)[-1]
- recipient_index = get_active_validator_indices(pre_state, current_epoch)[0]
- transfer_pubkey = pubkeys[-1]
- transfer_privkey = privkeys[-1]
- amount = get_balance(pre_state, sender_index)
- pre_transfer_recipient_balance = get_balance(pre_state, recipient_index)
- transfer = Transfer(
- sender=sender_index,
- recipient=recipient_index,
- amount=amount,
- fee=0,
- slot=pre_state.slot + 1,
- pubkey=transfer_pubkey,
- )
- transfer.signature = bls.sign(
- message_hash=signing_root(transfer),
- privkey=transfer_privkey,
- domain=get_domain(
- state=pre_state,
- domain_type=spec.DOMAIN_TRANSFER,
- )
- )
-
- # ensure withdrawal_credentials reproducable
- pre_state.validator_registry[sender_index].withdrawal_credentials = (
- spec.BLS_WITHDRAWAL_PREFIX_BYTE + hash(transfer_pubkey)[1:]
- )
- # un-activate so validator can transfer
- pre_state.validator_registry[sender_index].activation_eligibility_epoch = spec.FAR_FUTURE_EPOCH
-
- post_state = deepcopy(pre_state)
- #
- # Add to state via block transition
- #
- block = build_empty_block_for_next_slot(post_state)
- block.body.transfers.append(transfer)
- state_transition(post_state, block)
-
- sender_balance = get_balance(post_state, sender_index)
- recipient_balance = get_balance(post_state, recipient_index)
- assert sender_balance == 0
- assert recipient_balance == pre_transfer_recipient_balance + amount
-
- return pre_state, [block], post_state
-
-
-def test_balance_driven_status_transitions(state):
- current_epoch = get_current_epoch(state)
- validator_index = get_active_validator_indices(state, current_epoch)[-1]
-
- assert state.validator_registry[validator_index].exit_epoch == spec.FAR_FUTURE_EPOCH
-
- # set validator balance to below ejection threshold
- state.validator_registry[validator_index].effective_balance = spec.EJECTION_BALANCE
-
- post_state = deepcopy(state)
- #
- # trigger epoch transition
- #
- block = build_empty_block_for_next_slot(post_state)
- block.slot += spec.SLOTS_PER_EPOCH
- state_transition(post_state, block)
-
- assert post_state.validator_registry[validator_index].exit_epoch < spec.FAR_FUTURE_EPOCH
-
- return state, [block], post_state
-
-
-def test_historical_batch(state):
- state.slot += spec.SLOTS_PER_HISTORICAL_ROOT - (state.slot % spec.SLOTS_PER_HISTORICAL_ROOT) - 1
-
- post_state = deepcopy(state)
-
- block = build_empty_block_for_next_slot(post_state)
-
- state_transition(post_state, block)
-
- assert post_state.slot == block.slot
- assert get_current_epoch(post_state) % (spec.SLOTS_PER_HISTORICAL_ROOT // spec.SLOTS_PER_EPOCH) == 0
- assert len(post_state.historical_roots) == len(state.historical_roots) + 1
-
- return state, [block], post_state
-
-
-def test_eth1_data_votes(state):
- post_state = deepcopy(state)
-
- expected_votes = 0
- assert len(state.eth1_data_votes) == expected_votes
-
- blocks = []
- for _ in range(spec.SLOTS_PER_ETH1_VOTING_PERIOD - 1):
- block = build_empty_block_for_next_slot(post_state)
- state_transition(post_state, block)
- expected_votes += 1
- assert len(post_state.eth1_data_votes) == expected_votes
- blocks.append(block)
-
- block = build_empty_block_for_next_slot(post_state)
- state_transition(post_state, block)
- blocks.append(block)
-
- assert post_state.slot % spec.SLOTS_PER_ETH1_VOTING_PERIOD == 0
- assert len(post_state.eth1_data_votes) == 1
-
- return state, blocks, post_state
| BLS and testing
Decided I wanted to get this out to explain the current state of testing, and **collect feedback** (implementers please comment) on what you need from testing, and your feelings about BLS usage in tests.
# BLS and testing
The two pain-points to get a pretty (and large) set of test-vectors out for clients are:
- BLS Signature creation
- BLS Signature verification
And side-issue, but easily resolved:
*efficient creation of a genesis state*:
When BLS functionality is implemented in test-code (creation of signed deposits, and verification).
Solution would be to either cache it, or create it directly, without going through the spec functions (current temporary solution on experiment branch).
## Status
Talking about the status on [`spectest-deco` PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052) here, based on the `v06x` branch, where we are developing 0.6 improvements. (to be merged back into dev later)
### The testing pipeline currently looks like:
- py-spec, calls BLS stub
- test-helpers, don't create self-signed objects with valid signatures
- py-test code, unified with test-vector-creation (see [PR 1052](https://github.com/ethereum/eth2.0-specs/pull/1052))
- py-test runner to run spec-tests, purely for assertions
- test-generator running the spec-tests, passing `generator_mode=true` to each of them, making them output a test-vector.
### Pytests status:
- move from `tests/` to `eth2spec/test`, i.e. part of package
- removed use of `pytest`
- annotated with `@spec_test` or similar (see PR 1052)
- as part of test-generation effort, yay for shared effort:
- expanded in block-operation testing: [coverage checklist here](https://github.com/ethereum/eth2.0-specs/issues/927)
- slightly faster, less deep-copies
- stuck on BLS stub (no sig creation/verification)
### Test-generation status:
- BLS, SSZ-generic, SSZ-static, shuffling test generators still all in place and up to date (`v06x` branch)
- `operations` test-gen uses test-package ability to output test-vectors for each test-case
- but no valid signatures
- lack of a definition how to handle this signature problem as a test-consumer
- there are no signature-related testcases
- turning BLS off would effectively let you check conformance, but it's hacky, and not remotely a good practice to have even an option for...
- it's approx. ~140MB worth (iirc) of yaml encoded state-transitions, covering many edge-cases. Worth to get in the hands of implementers quick.
- `sanity` tests updated and can be cleanly used for test-generation, but requires more work to define the format of the test-vectors, as they is more variety.
- `epoch` processing tests also updated, also can be used, not as complete as block-processing, lower priority.
## Possible ways forward:
- Simple but hacky: "turn BLS off for testing"
- No "BLS off", BLS ON on client side, but only partially on spec side. Rely on signature verification not being hit before anything else during testing
- valid test cases generated with valid signatures
- invalid test cases marked: does it error because of BLS? And runners should check the reason for aborting processing: if it doesn't match, the test should fail. Now these pytests don't need full BLS update work, and can be released somewhat quicker
- "BLS on", more work (~1 week)
- slower on test-generation, but we get the best kind of test-vectors: correct, BLS verification ON.
- blocker: what if a test case fails because of a signature error (test setup not creating the sig correctly), instead of a real assertion case. Spec will look correct, passes tests, but things are not right. We need to mark Sig-verification errors distinctly, so we can catch these problems when we turn BLS on in the pyspec. How: instead of `assert verify_...`, just `verify_...`, and make it raise a special `BLSVerificationError` (or something like that)
- We likely still want to mark tests as "signature related" or not, so implementers can catch it easily if their code is not aborting properly before signature verification, to assure invalid inputs are not costly.
A work-in-progress introduction of actual full BLS usage in the pytests is started here: [`tests-with-sigs` branch](https://github.com/ethereum/eth2.0-specs/tree/tests-with-sigs)
Suggestions welcome.
|
googleapis__python-spanner-django-124 | [
{
"content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n if '-' in name:\n return '`' + name + '`'\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s;' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n",
"path": "spanner/django/operations.py"
}
] | [
{
"content": "from datetime import datetime\n\nfrom django.conf import settings\nfrom django.db.backends.base.operations import BaseDatabaseOperations\nfrom django.utils import timezone\nfrom spanner.dbapi.parse_utils import TimestampStr\n\n\nclass DatabaseOperations(BaseDatabaseOperations):\n # Django's lookup names that require a different name in Spanner's\n # EXTRACT() function.\n # https://cloud.google.com/spanner/docs/functions-and-operators#extract\n extract_names = {\n 'week_day': 'dayofweek',\n 'iso_week': 'isoweek',\n 'iso_year': 'isoyear',\n }\n\n def quote_name(self, name):\n if '-' in name:\n return '`' + name + '`'\n return name\n\n def bulk_insert_sql(self, fields, placeholder_rows):\n placeholder_rows_sql = (\", \".join(row) for row in placeholder_rows)\n values_sql = \", \".join(\"(%s)\" % sql for sql in placeholder_rows_sql)\n return \"VALUES \" + values_sql\n\n def sql_flush(self, style, tables, sequences, allow_cascade=False):\n # Cloud Spanner doesn't support TRUNCATE so DELETE instead.\n # A dummy WHERE clause is required.\n if tables:\n delete_sql = '%s %s %%s' % (\n style.SQL_KEYWORD('DELETE'),\n style.SQL_KEYWORD('FROM'),\n )\n return [\n delete_sql % style.SQL_FIELD(self.quote_name(table))\n for table in tables\n ]\n else:\n return []\n\n def adapt_datetimefield_value(self, value):\n if value is None:\n return None\n # Expression values are adapted by the database.\n if hasattr(value, 'resolve_expression'):\n return value\n # Cloud Spanner doesn't support tz-aware datetimes\n if timezone.is_aware(value):\n if settings.USE_TZ:\n value = timezone.make_naive(value, self.connection.timezone)\n else:\n raise ValueError(\"Cloud Spanner does not support timezone-aware datetimes when USE_TZ is False.\")\n return TimestampStr(value.isoformat(timespec='microseconds') + 'Z')\n\n def get_db_converters(self, expression):\n converters = super().get_db_converters(expression)\n internal_type = expression.output_field.get_internal_type()\n if internal_type == 'DateTimeField':\n converters.append(self.convert_datetimefield_value)\n return converters\n\n def convert_datetimefield_value(self, value, expression, connection):\n if value is None:\n return value\n # Cloud Spanner returns the\n # google.api_core.datetime_helpers.DatetimeWithNanoseconds subclass\n # of datetime with tzinfo=UTC (which should be replaced with the\n # connection's timezone). Django doesn't support nanoseconds so that\n # part is ignored.\n return datetime(\n value.year, value.month, value.day,\n value.hour, value.minute, value.second, value.microsecond,\n self.connection.timezone,\n )\n\n def date_extract_sql(self, lookup_type, field_name):\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s)' % (lookup_type, field_name)\n\n def datetime_extract_sql(self, lookup_type, field_name, tzname):\n tzname = self.connection.timezone if settings.USE_TZ else 'UTC'\n lookup_type = self.extract_names.get(lookup_type, lookup_type)\n return 'EXTRACT(%s FROM %s AT TIME ZONE \"%s\")' % (lookup_type, field_name, tzname)\n",
"path": "spanner/django/operations.py"
}
] | diff --git a/spanner/django/operations.py b/spanner/django/operations.py
index 96bae93c5d..e2790b0169 100644
--- a/spanner/django/operations.py
+++ b/spanner/django/operations.py
@@ -30,7 +30,7 @@ def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Cloud Spanner doesn't support TRUNCATE so DELETE instead.
# A dummy WHERE clause is required.
if tables:
- delete_sql = '%s %s %%s;' % (
+ delete_sql = '%s %s %%s' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
)
| dbapi: properly parse and ensure WHERE clause
PR #111's task was to add a WHERE clause to missing ones. However, the code in there assumes a single statement not terminated by a SQL terminator `;` and we blindly add ` WHERE 1=1` for missing statements e.g. given
```sql
DELETE FROM basic_article;
```
we make it
```sql
DELETE FROM basic_article; WHERE 1=1
```
but really we should be making it
```sql
DELETE FROM basic_article WHERE 1=1;
```
by parsing out tokens of all the statements and affixing ` WHERE 1=1` per statement.
|
tensorflow__tfx-2189 | [
{
"content": "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package Setup script for TFX.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\n\nimport setuptools\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command import develop\n# pylint: disable=g-bad-import-order\n# It is recommended to import setuptools prior to importing distutils to avoid\n# using legacy behavior from distutils.\n# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0\nfrom distutils import spawn\nfrom distutils.command import build\n# pylint: enable=g-bad-import-order\n\nfrom tfx import dependencies\nfrom tfx import version\nfrom tfx.tools import resolve_deps\n\n\nclass _BuildCommand(build.build):\n \"\"\"Build everything that is needed to install.\n\n This overrides the original distutils \"build\" command to to run gen_proto\n command before any sub_commands.\n\n build command is also invoked from bdist_wheel and install command, therefore\n this implementation covers the following commands:\n - pip install . (which invokes bdist_wheel)\n - python setup.py install (which invokes install command)\n - python setup.py bdist_wheel (which invokes bdist_wheel command)\n \"\"\"\n\n def _should_generate_proto(self):\n \"\"\"Predicate method for running GenProto command or not.\"\"\"\n return True\n\n # Add \"gen_proto\" command as the first sub_command of \"build\". Each\n # sub_command of \"build\" (e.g. \"build_py\", \"build_ext\", etc.) is executed\n # sequentially when running a \"build\" command, if the second item in the tuple\n # (predicate method) is evaluated to true.\n sub_commands = [\n ('gen_proto', _should_generate_proto),\n ] + build.build.sub_commands\n\n\nclass _DevelopCommand(develop.develop):\n \"\"\"Developmental install.\n\n https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode\n Unlike normal package installation where distribution is copied to the\n site-packages folder, developmental install creates a symbolic link to the\n source code directory, so that your local code change is immediately visible\n in runtime without re-installation.\n\n This is a setuptools-only (i.e. not included in distutils) command that is\n also used in pip's editable install (pip install -e). Originally it only\n invokes build_py and install_lib command, but we override it to run gen_proto\n command in advance.\n\n This implementation covers the following commands:\n - pip install -e . (developmental install)\n - python setup.py develop (which is invoked from developmental install)\n \"\"\"\n\n def run(self):\n self.run_command('gen_proto')\n # Run super().initialize_options. Command is an old-style class (i.e.\n # doesn't inherit object) and super() fails in python 2.\n develop.develop.run(self)\n\n\nclass _GenProtoCommand(setuptools.Command):\n \"\"\"Generate proto stub files in python.\n\n Running this command will populate foo_pb2.py file next to your foo.proto\n file.\n \"\"\"\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self._bazel_cmd = spawn.find_executable('bazel')\n if not self._bazel_cmd:\n raise RuntimeError(\n 'Could not find \"bazel\" binary. Please visit '\n 'https://docs.bazel.build/versions/master/install.html for '\n 'installation instruction.')\n\n def run(self):\n subprocess.check_call(\n [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],\n # Bazel should be invoked in a directory containing bazel WORKSPACE\n # file, which is the root directory.\n cwd=os.path.dirname(os.path.realpath(__file__)),)\n\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n\nsetup(\n name='tfx',\n version=version.__version__,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n namespace_packages=[],\n install_requires=dependencies.make_required_install_packages(),\n extras_require={\n # In order to use 'docker-image' or 'all', system libraries specified\n # under 'tfx/tools/docker/Dockerfile' are required\n 'docker-image': dependencies.make_extra_packages_docker_image(),\n 'tfjs': dependencies.make_extra_packages_tfjs(),\n 'all': dependencies.make_all_dependency_packages(),\n },\n # TODO(b/158761800): Move to [build-system] requires in pyproject.toml.\n setup_requires=[\n 'pytest-runner',\n 'poetry==1.0.9', # Required for ResolveDeps command.\n # Poetry API is not officially documented and subject\n # to change in the future. Thus fix the version.\n 'clikit>=0.4.3,<0.5', # Required for ResolveDeps command.\n ],\n cmdclass={\n 'build': _BuildCommand,\n 'develop': _DevelopCommand,\n 'gen_proto': _GenProtoCommand,\n 'resolve_deps': resolve_deps.ResolveDepsCommand,\n },\n python_requires='>=3.5,<4',\n packages=find_packages(),\n include_package_data=True,\n description='TensorFlow Extended (TFX) is a TensorFlow-based general-purpose machine learning platform implemented at Google',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n keywords='tensorflow tfx',\n url='https://www.tensorflow.org/tfx',\n download_url='https://github.com/tensorflow/tfx/tags',\n requires=[],\n # Below console_scripts, each line identifies one console script. The first\n # part before the equals sign (=) which is 'tfx', is the name of the script\n # that should be generated, the second part is the import path followed by a\n # colon (:) with the Click command group. After installation, the user can\n # invoke the CLI using \"tfx <command_group> <sub_command> <flags>\"\n entry_points=\"\"\"\n [console_scripts]\n tfx=tfx.tools.cli.cli_main:cli_group\n \"\"\")\n",
"path": "setup.py"
}
] | [
{
"content": "# Lint as: python2, python3\n# Copyright 2019 Google LLC. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Package Setup script for TFX.\"\"\"\n\nfrom __future__ import print_function\n\nimport os\nimport subprocess\n\nimport setuptools\nfrom setuptools import find_packages\nfrom setuptools import setup\nfrom setuptools.command import develop\n# pylint: disable=g-bad-import-order\n# It is recommended to import setuptools prior to importing distutils to avoid\n# using legacy behavior from distutils.\n# https://setuptools.readthedocs.io/en/latest/history.html#v48-0-0\nfrom distutils import spawn\nfrom distutils.command import build\n# pylint: enable=g-bad-import-order\n\nfrom tfx import dependencies\nfrom tfx import version\nfrom tfx.tools import resolve_deps\n\n\nclass _BuildCommand(build.build):\n \"\"\"Build everything that is needed to install.\n\n This overrides the original distutils \"build\" command to to run gen_proto\n command before any sub_commands.\n\n build command is also invoked from bdist_wheel and install command, therefore\n this implementation covers the following commands:\n - pip install . (which invokes bdist_wheel)\n - python setup.py install (which invokes install command)\n - python setup.py bdist_wheel (which invokes bdist_wheel command)\n \"\"\"\n\n def _should_generate_proto(self):\n \"\"\"Predicate method for running GenProto command or not.\"\"\"\n return True\n\n # Add \"gen_proto\" command as the first sub_command of \"build\". Each\n # sub_command of \"build\" (e.g. \"build_py\", \"build_ext\", etc.) is executed\n # sequentially when running a \"build\" command, if the second item in the tuple\n # (predicate method) is evaluated to true.\n sub_commands = [\n ('gen_proto', _should_generate_proto),\n ] + build.build.sub_commands\n\n\nclass _DevelopCommand(develop.develop):\n \"\"\"Developmental install.\n\n https://setuptools.readthedocs.io/en/latest/setuptools.html#development-mode\n Unlike normal package installation where distribution is copied to the\n site-packages folder, developmental install creates a symbolic link to the\n source code directory, so that your local code change is immediately visible\n in runtime without re-installation.\n\n This is a setuptools-only (i.e. not included in distutils) command that is\n also used in pip's editable install (pip install -e). Originally it only\n invokes build_py and install_lib command, but we override it to run gen_proto\n command in advance.\n\n This implementation covers the following commands:\n - pip install -e . (developmental install)\n - python setup.py develop (which is invoked from developmental install)\n \"\"\"\n\n def run(self):\n self.run_command('gen_proto')\n # Run super().initialize_options. Command is an old-style class (i.e.\n # doesn't inherit object) and super() fails in python 2.\n develop.develop.run(self)\n\n\nclass _GenProtoCommand(setuptools.Command):\n \"\"\"Generate proto stub files in python.\n\n Running this command will populate foo_pb2.py file next to your foo.proto\n file.\n \"\"\"\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self._bazel_cmd = spawn.find_executable('bazel')\n if not self._bazel_cmd:\n raise RuntimeError(\n 'Could not find \"bazel\" binary. Please visit '\n 'https://docs.bazel.build/versions/master/install.html for '\n 'installation instruction.')\n\n def run(self):\n subprocess.check_call(\n [self._bazel_cmd, 'run', '//build:gen_proto'],\n # Bazel should be invoked in a directory containing bazel WORKSPACE\n # file, which is the root directory.\n cwd=os.path.dirname(os.path.realpath(__file__)),)\n\n\n# Get the long description from the README file.\nwith open('README.md') as fp:\n _LONG_DESCRIPTION = fp.read()\n\n\nsetup(\n name='tfx',\n version=version.__version__,\n author='Google LLC',\n author_email='[email protected]',\n license='Apache 2.0',\n classifiers=[\n 'Development Status :: 4 - Beta',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Education',\n 'Intended Audience :: Science/Research',\n 'License :: OSI Approved :: Apache Software License',\n 'Operating System :: OS Independent',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3 :: Only',\n 'Topic :: Scientific/Engineering',\n 'Topic :: Scientific/Engineering :: Artificial Intelligence',\n 'Topic :: Scientific/Engineering :: Mathematics',\n 'Topic :: Software Development',\n 'Topic :: Software Development :: Libraries',\n 'Topic :: Software Development :: Libraries :: Python Modules',\n ],\n namespace_packages=[],\n install_requires=dependencies.make_required_install_packages(),\n extras_require={\n # In order to use 'docker-image' or 'all', system libraries specified\n # under 'tfx/tools/docker/Dockerfile' are required\n 'docker-image': dependencies.make_extra_packages_docker_image(),\n 'tfjs': dependencies.make_extra_packages_tfjs(),\n 'all': dependencies.make_all_dependency_packages(),\n },\n # TODO(b/158761800): Move to [build-system] requires in pyproject.toml.\n setup_requires=[\n 'pytest-runner',\n 'poetry==1.0.9', # Required for ResolveDeps command.\n # Poetry API is not officially documented and subject\n # to change in the future. Thus fix the version.\n 'clikit>=0.4.3,<0.5', # Required for ResolveDeps command.\n ],\n cmdclass={\n 'build': _BuildCommand,\n 'develop': _DevelopCommand,\n 'gen_proto': _GenProtoCommand,\n 'resolve_deps': resolve_deps.ResolveDepsCommand,\n },\n python_requires='>=3.5,<4',\n packages=find_packages(),\n include_package_data=True,\n description='TensorFlow Extended (TFX) is a TensorFlow-based general-purpose machine learning platform implemented at Google',\n long_description=_LONG_DESCRIPTION,\n long_description_content_type='text/markdown',\n keywords='tensorflow tfx',\n url='https://www.tensorflow.org/tfx',\n download_url='https://github.com/tensorflow/tfx/tags',\n requires=[],\n # Below console_scripts, each line identifies one console script. The first\n # part before the equals sign (=) which is 'tfx', is the name of the script\n # that should be generated, the second part is the import path followed by a\n # colon (:) with the Click command group. After installation, the user can\n # invoke the CLI using \"tfx <command_group> <sub_command> <flags>\"\n entry_points=\"\"\"\n [console_scripts]\n tfx=tfx.tools.cli.cli_main:cli_group\n \"\"\")\n",
"path": "setup.py"
}
] | diff --git a/.dockerignore b/.dockerignore
index 27a1df850c..0a088a84c7 100644
--- a/.dockerignore
+++ b/.dockerignore
@@ -9,7 +9,7 @@ __pycache__/
# Distribution / packaging
.Python
-build/
+# build/ # build/ contains required files for building tfx package.
develop-eggs/
dist/
downloads/
diff --git a/.gitignore b/.gitignore
index 4d61c6ee1f..5a6afcc82f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -9,7 +9,7 @@ __pycache__/
# Distribution / packaging
.Python
-build/
+# build/ # build/ contains required files for building tfx packages.
develop-eggs/
dist/
downloads/
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index d42fb77bab..f784b7a4c1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -86,7 +86,7 @@ further stub generation requires manual invocation of the following command.)
```shell
# In the tfx root directory
-bazel run //tfx/build:gen_proto
+bazel run //build:gen_proto
```
## Running Unit Tests
diff --git a/tfx/build/BUILD b/build/BUILD
similarity index 100%
rename from tfx/build/BUILD
rename to build/BUILD
diff --git a/tfx/build/gen_proto.sh b/build/gen_proto.sh
similarity index 100%
rename from tfx/build/gen_proto.sh
rename to build/gen_proto.sh
diff --git a/setup.py b/setup.py
index 874d73ecc3..612b6c1fad 100644
--- a/setup.py
+++ b/setup.py
@@ -108,7 +108,7 @@ def finalize_options(self):
def run(self):
subprocess.check_call(
- [self._bazel_cmd, 'run', '//tfx/build:gen_proto'],
+ [self._bazel_cmd, 'run', '//build:gen_proto'],
# Bazel should be invoked in a directory containing bazel WORKSPACE
# file, which is the root directory.
cwd=os.path.dirname(os.path.realpath(__file__)),)
| Project can't be cloned correctly on macOS due to case insensitivity
Under the `tfx` folder there's a folder called `build` and a bazel file called `BUILD`. Because macOS is by default case insensitive, only the folder is cloned when `git clone` is run. This means that when trying to build locally, bazel won't be able to find the `BUILD` file required to compile the protobuf schemas, and will fail.
|
akvo__akvo-rsr-3604 | [
{
"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat, Project\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ReportViewSet(BaseRSRViewSet):\n \"\"\"Viewset providing Result data.\"\"\"\n\n queryset = Report.objects.prefetch_related(\n 'organisations',\n 'formats',\n )\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n reports = super(ReportViewSet, self).get_queryset()\n user = self.request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n return reports\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n\n\n@api_view(['GET'])\ndef project_reports(request, project_pk):\n \"\"\"A view for displaying project specific reports.\"\"\"\n\n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n .filter(url__icontains='project')\n\n user = request.user\n if not user.has_perm('rsr.view_project', project):\n return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)\n\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n\n if not is_admin:\n partners_org = project.partner_organisation_pks()\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=partners_org)\n )\n\n serializer = ReportSerializer(reports.distinct(), many=True)\n return Response(serializer.data)\n",
"path": "akvo/rest/views/report.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\n# Akvo RSR is covered by the GNU Affero General Public License.\n# See more details in the license.txt file located at the root folder of the Akvo RSR module.\n# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.\n\nfrom django.db.models import Q\nfrom django.shortcuts import get_object_or_404\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom akvo.rsr.models import Report, ReportFormat, Project\nfrom ..serializers import ReportSerializer, ReportFormatSerializer\nfrom ..viewsets import BaseRSRViewSet\n\n\nclass ReportViewSet(BaseRSRViewSet):\n \"\"\"Viewset providing Result data.\"\"\"\n\n queryset = Report.objects.prefetch_related(\n 'organisations',\n 'formats',\n )\n serializer_class = ReportSerializer\n\n def get_queryset(self):\n \"\"\"\n Allow custom filter for sync_owner, since this field has been replaced by the\n reporting org partnership.\n \"\"\"\n reports = super(ReportViewSet, self).get_queryset()\n user = self.request.user\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n if not is_admin:\n # Show only those reports that the user is allowed to see\n approved_orgs = user.approved_organisations() if not user.is_anonymous() else []\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=approved_orgs)\n ).distinct()\n return reports\n\n\n@api_view(['GET'])\ndef report_formats(request):\n \"\"\"\n A view for displaying all report format information.\n \"\"\"\n return Response({\n 'count': ReportFormat.objects.all().count(),\n 'results': [ReportFormatSerializer(f).data for f in ReportFormat.objects.all()],\n })\n\n\n@api_view(['GET'])\ndef project_reports(request, project_pk):\n \"\"\"A view for displaying project specific reports.\"\"\"\n\n project = get_object_or_404(Project, pk=project_pk)\n reports = Report.objects.prefetch_related('formats', 'organisations')\\\n .filter(url__icontains='{project}')\n\n user = request.user\n if not user.has_perm('rsr.view_project', project):\n return Response('Request not allowed', status=status.HTTP_403_FORBIDDEN)\n\n is_admin = user.is_active and (user.is_superuser or user.is_admin)\n\n if not is_admin:\n partners_org = project.partner_organisation_pks()\n reports = reports.filter(\n Q(organisations=None) | Q(organisations__in=partners_org)\n )\n\n serializer = ReportSerializer(reports.distinct(), many=True)\n return Response(serializer.data)\n",
"path": "akvo/rest/views/report.py"
}
] | diff --git a/akvo/rest/views/report.py b/akvo/rest/views/report.py
index 005b829a79..9060551a8d 100644
--- a/akvo/rest/views/report.py
+++ b/akvo/rest/views/report.py
@@ -58,7 +58,7 @@ def project_reports(request, project_pk):
project = get_object_or_404(Project, pk=project_pk)
reports = Report.objects.prefetch_related('formats', 'organisations')\
- .filter(url__icontains='project')
+ .filter(url__icontains='{project}')
user = request.user
if not user.has_perm('rsr.view_project', project):
diff --git a/akvo/rsr/tests/base.py b/akvo/rsr/tests/base.py
index 975a18a596..25766c201f 100644
--- a/akvo/rsr/tests/base.py
+++ b/akvo/rsr/tests/base.py
@@ -60,8 +60,9 @@ def create_project(title, published=True, public=True):
return project
@staticmethod
- def create_report(report_name, organisation=None, is_org_report=False):
- url = '/{organisation}/?format={format}' if is_org_report else '/{project}/?format={format}'
+ def create_report(report_name, organisation=None, is_org_report=False, url=None):
+ if url is None:
+ url = '/{organisation}/?format={format}' if is_org_report else '/{project}/?format={format}'
report = Report.objects.create(
name=report_name, title=report_name, url=url)
if organisation is not None:
diff --git a/akvo/rsr/tests/rest/test_project_reports.py b/akvo/rsr/tests/rest/test_project_reports.py
index 6a1d3eb075..82a03079e6 100644
--- a/akvo/rsr/tests/rest/test_project_reports.py
+++ b/akvo/rsr/tests/rest/test_project_reports.py
@@ -114,6 +114,10 @@ def test_only_reports_with_project_parameter_are_shown(self):
org1 = self.create_organisation('org-1')
self.create_report('report-1')
self.create_report('report-2', is_org_report=True)
+ self.create_report(
+ 'projects-overview',
+ url='/en/reports/project_overview/{organisation}?format={format}&download=true'
+ )
self.make_partner(proj1, org1)
user = self.create_user('[email protected]', 'secret')
| Organisation report shown in project reports page
The "Project overview" report is displayed on the project report page, which is an organisation report and should not be displayed on the project report page.
|
numba__numba-3241 | [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom numba import types, utils\nfrom numba.typing.templates import (AttributeTemplate, AbstractTemplate,\n infer, infer_getattr, signature,\n bound_function)\n# import time side effect: array operations requires typing support of sequence\n# defined in collections: e.g. array.shape[i]\nfrom numba.typing import collections\nfrom numba.errors import TypingError\n\nIndexing = namedtuple(\"Indexing\", (\"index\", \"result\", \"advanced\"))\n\n\ndef get_array_index_type(ary, idx):\n \"\"\"\n Returns None or a tuple-3 for the types of the input array, index, and\n resulting type of ``array[index]``.\n\n Note: This is shared logic for ndarray getitem and setitem.\n \"\"\"\n if not isinstance(ary, types.Buffer):\n return\n\n ndim = ary.ndim\n\n left_indices = []\n right_indices = []\n ellipsis_met = False\n advanced = False\n has_integer = False\n\n if not isinstance(idx, types.BaseTuple):\n idx = [idx]\n\n # Walk indices\n for ty in idx:\n if ty is types.ellipsis:\n if ellipsis_met:\n raise TypeError(\"only one ellipsis allowed in array index \"\n \"(got %s)\" % (idx,))\n ellipsis_met = True\n elif isinstance(ty, types.SliceType):\n pass\n elif isinstance(ty, types.Integer):\n # Normalize integer index\n ty = types.intp if ty.signed else types.uintp\n # Integer indexing removes the given dimension\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array) and ty.ndim == 0\n and isinstance(ty.dtype, types.Integer)):\n # 0-d array used as integer index\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array)\n and ty.ndim == 1\n and isinstance(ty.dtype, (types.Integer, types.Boolean))):\n if advanced or has_integer:\n # We don't support the complicated combination of\n # advanced indices (and integers are considered part\n # of them by Numpy).\n raise NotImplementedError(\"only one advanced index supported\")\n advanced = True\n else:\n raise TypeError(\"unsupported array index type %s in %s\"\n % (ty, idx))\n (right_indices if ellipsis_met else left_indices).append(ty)\n\n # Only Numpy arrays support advanced indexing\n if advanced and not isinstance(ary, types.Array):\n return\n\n # Check indices and result dimensionality\n all_indices = left_indices + right_indices\n if ellipsis_met:\n assert right_indices[0] is types.ellipsis\n del right_indices[0]\n\n n_indices = len(all_indices) - ellipsis_met\n if n_indices > ary.ndim:\n raise TypeError(\"cannot index %s with %d indices: %s\"\n % (ary, n_indices, idx))\n if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:\n # Full integer indexing => scalar result\n # (note if ellipsis is present, a 0-d view is returned instead)\n res = ary.dtype\n\n elif advanced:\n # Result is a copy\n res = ary.copy(ndim=ndim, layout='C', readonly=False)\n\n else:\n # Result is a view\n if ary.slice_is_copy:\n # Avoid view semantics when the original type creates a copy\n # when slicing.\n return\n\n # Infer layout\n layout = ary.layout\n\n def keeps_contiguity(ty, is_innermost):\n # A slice can only keep an array contiguous if it is the\n # innermost index and it is not strided\n return (ty is types.ellipsis or isinstance(ty, types.Integer)\n or (is_innermost and isinstance(ty, types.SliceType)\n and not ty.has_step))\n\n def check_contiguity(outer_indices):\n \"\"\"\n Whether indexing with the given indices (from outer to inner in\n physical layout order) can keep an array contiguous.\n \"\"\"\n for ty in outer_indices[:-1]:\n if not keeps_contiguity(ty, False):\n return False\n if outer_indices and not keeps_contiguity(outer_indices[-1], True):\n return False\n return True\n\n if layout == 'C':\n # Integer indexing on the left keeps the array C-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n left_indices = left_indices + right_indices\n right_indices = []\n if right_indices:\n layout = 'A'\n elif not check_contiguity(left_indices):\n layout = 'A'\n elif layout == 'F':\n # Integer indexing on the right keeps the array F-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n right_indices = left_indices + right_indices\n left_indices = []\n if left_indices:\n layout = 'A'\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n\n res = ary.copy(ndim=ndim, layout=layout)\n\n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n idx = types.BaseTuple.from_types(all_indices)\n else:\n idx, = all_indices\n\n return Indexing(idx, res, advanced)\n\n\n@infer\nclass GetItemBuffer(AbstractTemplate):\n key = \"getitem\"\n\n def generic(self, args, kws):\n assert not kws\n [ary, idx] = args\n out = get_array_index_type(ary, idx)\n if out is not None:\n return signature(out.result, ary, out.index)\n\n@infer\nclass SetItemBuffer(AbstractTemplate):\n key = \"setitem\"\n\n def generic(self, args, kws):\n assert not kws\n ary, idx, val = args\n if not isinstance(ary, types.Buffer):\n return\n if not ary.mutable:\n raise TypeError(\"Cannot modify value of type %s\" %(ary,))\n out = get_array_index_type(ary, idx)\n if out is None:\n return\n\n idx = out.index\n res = out.result\n if isinstance(res, types.Array):\n # Indexing produces an array\n if isinstance(val, types.Array):\n if not self.context.can_convert(val.dtype, res.dtype):\n # DType conversion not possible\n return\n else:\n res = val\n elif isinstance(val, types.Sequence):\n if (res.ndim == 1 and\n self.context.can_convert(val.dtype, res.dtype)):\n # Allow assignement of sequence to 1d array\n res = val\n else:\n # NOTE: sequence-to-array broadcasting is unsupported\n return\n else:\n # Allow scalar broadcasting\n if self.context.can_convert(val, res.dtype):\n res = res.dtype\n else:\n # Incompatible scalar type\n return\n elif not isinstance(val, types.Array):\n # Single item assignment\n if not self.context.can_convert(val, res):\n # if the array dtype is not yet defined\n if not res.is_precise():\n # set the array type to use the dtype of value (RHS)\n newary = ary.copy(dtype=val)\n return signature(types.none, newary, idx, res)\n else:\n return\n res = val\n else:\n return\n return signature(types.none, ary, idx, res)\n\n\ndef normalize_shape(shape):\n if isinstance(shape, types.UniTuple):\n if isinstance(shape.dtype, types.Integer):\n dimtype = types.intp if shape.dtype.signed else types.uintp\n return types.UniTuple(dimtype, len(shape))\n\n elif isinstance(shape, types.Tuple) and shape.count == 0:\n # Force (0 x intp) for consistency with other shapes\n return types.UniTuple(types.intp, 0)\n\n\n@infer_getattr\nclass ArrayAttribute(AttributeTemplate):\n key = types.Array\n\n def resolve_dtype(self, ary):\n return types.DType(ary.dtype)\n\n def resolve_itemsize(self, ary):\n return types.intp\n\n def resolve_shape(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_strides(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_ndim(self, ary):\n return types.intp\n\n def resolve_size(self, ary):\n return types.intp\n\n def resolve_flat(self, ary):\n return types.NumpyFlatType(ary)\n\n def resolve_ctypes(self, ary):\n return types.ArrayCTypes(ary)\n\n def resolve_flags(self, ary):\n return types.ArrayFlags(ary)\n\n def resolve_T(self, ary):\n if ary.ndim <= 1:\n retty = ary\n else:\n layout = {\"C\": \"F\", \"F\": \"C\"}.get(ary.layout, \"A\")\n retty = ary.copy(layout=layout)\n return retty\n\n def resolve_real(self, ary):\n return self._resolve_real_imag(ary, attr='real')\n\n def resolve_imag(self, ary):\n return self._resolve_real_imag(ary, attr='imag')\n\n def _resolve_real_imag(self, ary, attr):\n if ary.dtype in types.complex_domain:\n return ary.copy(dtype=ary.dtype.underlying_float, layout='A')\n elif ary.dtype in types.number_domain:\n res = ary.copy(dtype=ary.dtype)\n if attr == 'imag':\n res = res.copy(readonly=True)\n return res\n else:\n msg = \"cannot access .{} of array of {}\"\n raise TypingError(msg.format(attr, ary.dtype))\n\n @bound_function(\"array.transpose\")\n def resolve_transpose(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"transpose() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if len(args) == 0:\n return signature(self.resolve_T(ary))\n\n if len(args) == 1:\n shape, = args\n\n if sentry_shape_scalar(shape):\n assert ary.ndim == 1\n return signature(ary, *args)\n\n shape = normalize_shape(shape)\n if shape is None:\n return\n\n assert ary.ndim == shape.count\n return signature(self.resolve_T(ary), shape)\n\n else:\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"transpose({0}) is not supported\".format(\n ', '.join(args)))\n assert ary.ndim == len(args)\n return signature(self.resolve_T(ary), *args)\n\n @bound_function(\"array.copy\")\n def resolve_copy(self, ary, args, kws):\n assert not args\n assert not kws\n retty = ary.copy(layout=\"C\", readonly=False)\n return signature(retty)\n\n @bound_function(\"array.item\")\n def resolve_item(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if not args:\n return signature(ary.dtype)\n\n @bound_function(\"array.itemset\")\n def resolve_itemset(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if len(args) == 1:\n return signature(types.none, ary.dtype)\n\n @bound_function(\"array.nonzero\")\n def resolve_nonzero(self, ary, args, kws):\n assert not args\n assert not kws\n # 0-dim arrays return one result array\n ndim = max(ary.ndim, 1)\n retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)\n return signature(retty)\n\n @bound_function(\"array.reshape\")\n def resolve_reshape(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"reshape() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if ary.layout not in 'CF':\n # only work for contiguous array\n raise TypeError(\"reshape() supports contiguous array only\")\n\n if len(args) == 1:\n # single arg\n shape, = args\n\n if sentry_shape_scalar(shape):\n ndim = 1\n else:\n shape = normalize_shape(shape)\n if shape is None:\n return\n ndim = shape.count\n retty = ary.copy(ndim=ndim)\n return signature(retty, shape)\n\n elif len(args) == 0:\n # no arg\n raise TypeError(\"reshape() take at least one arg\")\n\n else:\n # vararg case\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"reshape({0}) is not supported\".format(\n ', '.join(args)))\n\n retty = ary.copy(ndim=len(args))\n return signature(retty, *args)\n\n @bound_function(\"array.sort\")\n def resolve_sort(self, ary, args, kws):\n assert not args\n assert not kws\n if ary.ndim == 1:\n return signature(types.none)\n\n @bound_function(\"array.argsort\")\n def resolve_argsort(self, ary, args, kws):\n assert not args\n kwargs = dict(kws)\n kind = kwargs.pop('kind', types.Const('quicksort'))\n if kwargs:\n msg = \"Unsupported keywords: {!r}\"\n raise TypingError(msg.format([k for k in kwargs.keys()]))\n if ary.ndim == 1:\n def argsort_stub(kind='quicksort'):\n pass\n pysig = utils.pysignature(argsort_stub)\n sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)\n return sig\n\n @bound_function(\"array.view\")\n def resolve_view(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n retty = ary.copy(dtype=dtype)\n return signature(retty, *args)\n\n @bound_function(\"array.astype\")\n def resolve_astype(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n if not self.context.can_convert(ary.dtype, dtype):\n raise TypeError(\"astype(%s) not supported on %s: \"\n \"cannot convert from %s to %s\"\n % (dtype, ary, ary.dtype, dtype))\n layout = ary.layout if ary.layout in 'CF' else 'C'\n retty = ary.copy(dtype=dtype, layout=layout)\n return signature(retty, *args)\n\n @bound_function(\"array.ravel\")\n def resolve_ravel(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.flatten\")\n def resolve_flatten(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.take\")\n def resolve_take(self, ary, args, kws):\n assert not kws\n argty, = args\n if isinstance(argty, types.Integer):\n sig = signature(ary.dtype, *args)\n elif isinstance(argty, types.Array):\n sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)\n elif isinstance(argty, types.List): # 1d lists only\n sig = signature(types.Array(ary.dtype, 1, 'C'), *args)\n elif isinstance(argty, types.BaseTuple):\n sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)\n else:\n raise TypeError(\"take(%s) not supported for %s\" % argty)\n return sig\n\n def generic_resolve(self, ary, attr):\n # Resolution of other attributes, for record arrays\n if isinstance(ary.dtype, types.Record):\n if attr in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')\n\n\n@infer_getattr\nclass DTypeAttr(AttributeTemplate):\n key = types.DType\n\n def resolve_type(self, ary):\n # Wrap the numeric type in NumberClass\n return types.NumberClass(ary.dtype)\n\n def resolve_kind(self, ary):\n if isinstance(ary.key, types.scalars.Float):\n val = 'f'\n elif isinstance(ary.key, types.scalars.Integer):\n val = 'i'\n else:\n return None # other types not supported yet\n return types.Const(val)\n\n@infer\nclass StaticGetItemArray(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n ary, idx = args\n if (isinstance(ary, types.Array) and isinstance(idx, str) and\n isinstance(ary.dtype, types.Record)):\n if idx in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')\n\n\n@infer_getattr\nclass RecordAttribute(AttributeTemplate):\n key = types.Record\n\n def generic_resolve(self, record, attr):\n ret = record.typeof(attr)\n assert ret\n return ret\n\n@infer\nclass StaticGetItemRecord(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for records\n record, idx = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n ret = record.typeof(idx)\n assert ret\n return ret\n\n@infer\nclass StaticSetItemRecord(AbstractTemplate):\n key = \"static_setitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n record, idx, value = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n expectedty = record.typeof(idx)\n if self.context.can_convert(value, expectedty) is not None:\n return signature(types.void, record, types.Const(idx), value)\n\n\n@infer_getattr\nclass ArrayCTypesAttribute(AttributeTemplate):\n key = types.ArrayCTypes\n\n def resolve_data(self, ctinfo):\n return types.uintp\n\n\n@infer_getattr\nclass ArrayFlagsAttribute(AttributeTemplate):\n key = types.ArrayFlags\n\n def resolve_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_c_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_f_contiguous(self, ctflags):\n return types.boolean\n\n\n@infer_getattr\nclass NestedArrayAttribute(ArrayAttribute):\n key = types.NestedArray\n\n\ndef _expand_integer(ty):\n \"\"\"\n If *ty* is an integer, expand it to a machine int (like Numpy).\n \"\"\"\n if isinstance(ty, types.Integer):\n if ty.signed:\n return max(types.intp, ty)\n else:\n return max(types.uintp, ty)\n elif isinstance(ty, types.Boolean):\n return types.intp\n else:\n return ty\n\ndef generic_homog(self, args, kws):\n assert not args\n assert not kws\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_expand(self, args, kws):\n assert not args\n assert not kws\n return signature(_expand_integer(self.this.dtype), recvr=self.this)\n\ndef sum_expand(self, args, kws):\n \"\"\"\n sum can be called with or without an axis parameter.\n \"\"\"\n pysig = None\n if kws:\n def sum_stub(axis):\n pass\n pysig = utils.pysignature(sum_stub)\n # rewrite args\n args = list(args) + [kws['axis']]\n kws = None\n args_len = len(args)\n assert args_len <= 1\n if args_len == 0:\n # No axis parameter so the return type of the summation is a scalar\n # of the type of the array.\n out = signature(_expand_integer(self.this.dtype), *args,\n recvr=self.this)\n else:\n # There is an axis paramter so the return type of this summation is\n # an array of dimension one less than the input array.\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=self.this.ndim-1, layout='C')\n out = signature(return_type, *args, recvr=self.this)\n return out.replace(pysig=pysig)\n\ndef generic_expand_cumulative(self, args, kws):\n assert not args\n assert not kws\n assert isinstance(self.this, types.Array)\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=1, layout='C')\n return signature(return_type, recvr=self.this)\n\ndef generic_hetero_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_hetero_always_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n if isinstance(self.this.dtype, types.Complex):\n return signature(self.this.dtype.underlying_float, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_index(self, args, kws):\n assert not args\n assert not kws\n return signature(types.intp, recvr=self.this)\n\ndef install_array_method(name, generic, support_literals=False):\n my_attr = {\"key\": \"array.\" + name, \"generic\": generic}\n temp_class = type(\"Array_\" + name, (AbstractTemplate,), my_attr)\n if support_literals:\n temp_class.support_literals = support_literals\n def array_attribute_attachment(self, ary):\n return types.BoundFunction(temp_class, ary)\n\n setattr(ArrayAttribute, \"resolve_\" + name, array_attribute_attachment)\n\n# Functions that return the same type as the array\nfor fname in [\"min\", \"max\"]:\n install_array_method(fname, generic_homog)\n\n# Functions that return a machine-width type, to avoid overflows\ninstall_array_method(\"prod\", generic_expand)\ninstall_array_method(\"sum\", sum_expand, support_literals=True)\n\n# Functions that return a machine-width type, to avoid overflows\nfor fname in [\"cumsum\", \"cumprod\"]:\n install_array_method(fname, generic_expand_cumulative)\n\n# Functions that require integer arrays get promoted to float64 return\nfor fName in [\"mean\"]:\n install_array_method(fName, generic_hetero_real)\n\n# var and std by definition return in real space and int arrays\n# get promoted to float64 return\nfor fName in [\"var\", \"std\"]:\n install_array_method(fName, generic_hetero_always_real)\n\n\n# Functions that return an index (intp)\ninstall_array_method(\"argmin\", generic_index)\ninstall_array_method(\"argmax\", generic_index)\n",
"path": "numba/typing/arraydecl.py"
}
] | [
{
"content": "from __future__ import print_function, division, absolute_import\n\nimport numpy as np\n\nfrom collections import namedtuple\n\nfrom numba import types, utils\nfrom numba.typing.templates import (AttributeTemplate, AbstractTemplate,\n infer, infer_getattr, signature,\n bound_function)\n# import time side effect: array operations requires typing support of sequence\n# defined in collections: e.g. array.shape[i]\nfrom numba.typing import collections\nfrom numba.errors import TypingError\n\nIndexing = namedtuple(\"Indexing\", (\"index\", \"result\", \"advanced\"))\n\n\ndef get_array_index_type(ary, idx):\n \"\"\"\n Returns None or a tuple-3 for the types of the input array, index, and\n resulting type of ``array[index]``.\n\n Note: This is shared logic for ndarray getitem and setitem.\n \"\"\"\n if not isinstance(ary, types.Buffer):\n return\n\n ndim = ary.ndim\n\n left_indices = []\n right_indices = []\n ellipsis_met = False\n advanced = False\n has_integer = False\n\n if not isinstance(idx, types.BaseTuple):\n idx = [idx]\n\n # Walk indices\n for ty in idx:\n if ty is types.ellipsis:\n if ellipsis_met:\n raise TypeError(\"only one ellipsis allowed in array index \"\n \"(got %s)\" % (idx,))\n ellipsis_met = True\n elif isinstance(ty, types.SliceType):\n pass\n elif isinstance(ty, types.Integer):\n # Normalize integer index\n ty = types.intp if ty.signed else types.uintp\n # Integer indexing removes the given dimension\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array) and ty.ndim == 0\n and isinstance(ty.dtype, types.Integer)):\n # 0-d array used as integer index\n ndim -= 1\n has_integer = True\n elif (isinstance(ty, types.Array)\n and ty.ndim == 1\n and isinstance(ty.dtype, (types.Integer, types.Boolean))):\n if advanced or has_integer:\n # We don't support the complicated combination of\n # advanced indices (and integers are considered part\n # of them by Numpy).\n raise NotImplementedError(\"only one advanced index supported\")\n advanced = True\n else:\n raise TypeError(\"unsupported array index type %s in %s\"\n % (ty, idx))\n (right_indices if ellipsis_met else left_indices).append(ty)\n\n # Only Numpy arrays support advanced indexing\n if advanced and not isinstance(ary, types.Array):\n return\n\n # Check indices and result dimensionality\n all_indices = left_indices + right_indices\n if ellipsis_met:\n assert right_indices[0] is types.ellipsis\n del right_indices[0]\n\n n_indices = len(all_indices) - ellipsis_met\n if n_indices > ary.ndim:\n raise TypeError(\"cannot index %s with %d indices: %s\"\n % (ary, n_indices, idx))\n if n_indices == ary.ndim and ndim == 0 and not ellipsis_met:\n # Full integer indexing => scalar result\n # (note if ellipsis is present, a 0-d view is returned instead)\n res = ary.dtype\n\n elif advanced:\n # Result is a copy\n res = ary.copy(ndim=ndim, layout='C', readonly=False)\n\n else:\n # Result is a view\n if ary.slice_is_copy:\n # Avoid view semantics when the original type creates a copy\n # when slicing.\n return\n\n # Infer layout\n layout = ary.layout\n\n def keeps_contiguity(ty, is_innermost):\n # A slice can only keep an array contiguous if it is the\n # innermost index and it is not strided\n return (ty is types.ellipsis or isinstance(ty, types.Integer)\n or (is_innermost and isinstance(ty, types.SliceType)\n and not ty.has_step))\n\n def check_contiguity(outer_indices):\n \"\"\"\n Whether indexing with the given indices (from outer to inner in\n physical layout order) can keep an array contiguous.\n \"\"\"\n for ty in outer_indices[:-1]:\n if not keeps_contiguity(ty, False):\n return False\n if outer_indices and not keeps_contiguity(outer_indices[-1], True):\n return False\n return True\n\n if layout == 'C':\n # Integer indexing on the left keeps the array C-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n left_indices = left_indices + right_indices\n right_indices = []\n if right_indices:\n layout = 'A'\n elif not check_contiguity(left_indices):\n layout = 'A'\n elif layout == 'F':\n # Integer indexing on the right keeps the array F-contiguous\n if n_indices == ary.ndim:\n # If all indices are there, ellipsis's place is indifferent\n right_indices = left_indices + right_indices\n left_indices = []\n if left_indices:\n layout = 'A'\n elif not check_contiguity(right_indices[::-1]):\n layout = 'A'\n\n if ndim == 0:\n # Implicitly convert to a scalar if the output ndim==0\n res = ary.dtype\n else:\n res = ary.copy(ndim=ndim, layout=layout)\n\n # Re-wrap indices\n if isinstance(idx, types.BaseTuple):\n idx = types.BaseTuple.from_types(all_indices)\n else:\n idx, = all_indices\n\n return Indexing(idx, res, advanced)\n\n\n@infer\nclass GetItemBuffer(AbstractTemplate):\n key = \"getitem\"\n\n def generic(self, args, kws):\n assert not kws\n [ary, idx] = args\n out = get_array_index_type(ary, idx)\n if out is not None:\n return signature(out.result, ary, out.index)\n\n@infer\nclass SetItemBuffer(AbstractTemplate):\n key = \"setitem\"\n\n def generic(self, args, kws):\n assert not kws\n ary, idx, val = args\n if not isinstance(ary, types.Buffer):\n return\n if not ary.mutable:\n raise TypeError(\"Cannot modify value of type %s\" %(ary,))\n out = get_array_index_type(ary, idx)\n if out is None:\n return\n\n idx = out.index\n res = out.result\n if isinstance(res, types.Array):\n # Indexing produces an array\n if isinstance(val, types.Array):\n if not self.context.can_convert(val.dtype, res.dtype):\n # DType conversion not possible\n return\n else:\n res = val\n elif isinstance(val, types.Sequence):\n if (res.ndim == 1 and\n self.context.can_convert(val.dtype, res.dtype)):\n # Allow assignement of sequence to 1d array\n res = val\n else:\n # NOTE: sequence-to-array broadcasting is unsupported\n return\n else:\n # Allow scalar broadcasting\n if self.context.can_convert(val, res.dtype):\n res = res.dtype\n else:\n # Incompatible scalar type\n return\n elif not isinstance(val, types.Array):\n # Single item assignment\n if not self.context.can_convert(val, res):\n # if the array dtype is not yet defined\n if not res.is_precise():\n # set the array type to use the dtype of value (RHS)\n newary = ary.copy(dtype=val)\n return signature(types.none, newary, idx, res)\n else:\n return\n res = val\n else:\n return\n return signature(types.none, ary, idx, res)\n\n\ndef normalize_shape(shape):\n if isinstance(shape, types.UniTuple):\n if isinstance(shape.dtype, types.Integer):\n dimtype = types.intp if shape.dtype.signed else types.uintp\n return types.UniTuple(dimtype, len(shape))\n\n elif isinstance(shape, types.Tuple) and shape.count == 0:\n # Force (0 x intp) for consistency with other shapes\n return types.UniTuple(types.intp, 0)\n\n\n@infer_getattr\nclass ArrayAttribute(AttributeTemplate):\n key = types.Array\n\n def resolve_dtype(self, ary):\n return types.DType(ary.dtype)\n\n def resolve_itemsize(self, ary):\n return types.intp\n\n def resolve_shape(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_strides(self, ary):\n return types.UniTuple(types.intp, ary.ndim)\n\n def resolve_ndim(self, ary):\n return types.intp\n\n def resolve_size(self, ary):\n return types.intp\n\n def resolve_flat(self, ary):\n return types.NumpyFlatType(ary)\n\n def resolve_ctypes(self, ary):\n return types.ArrayCTypes(ary)\n\n def resolve_flags(self, ary):\n return types.ArrayFlags(ary)\n\n def resolve_T(self, ary):\n if ary.ndim <= 1:\n retty = ary\n else:\n layout = {\"C\": \"F\", \"F\": \"C\"}.get(ary.layout, \"A\")\n retty = ary.copy(layout=layout)\n return retty\n\n def resolve_real(self, ary):\n return self._resolve_real_imag(ary, attr='real')\n\n def resolve_imag(self, ary):\n return self._resolve_real_imag(ary, attr='imag')\n\n def _resolve_real_imag(self, ary, attr):\n if ary.dtype in types.complex_domain:\n return ary.copy(dtype=ary.dtype.underlying_float, layout='A')\n elif ary.dtype in types.number_domain:\n res = ary.copy(dtype=ary.dtype)\n if attr == 'imag':\n res = res.copy(readonly=True)\n return res\n else:\n msg = \"cannot access .{} of array of {}\"\n raise TypingError(msg.format(attr, ary.dtype))\n\n @bound_function(\"array.transpose\")\n def resolve_transpose(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"transpose() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if len(args) == 0:\n return signature(self.resolve_T(ary))\n\n if len(args) == 1:\n shape, = args\n\n if sentry_shape_scalar(shape):\n assert ary.ndim == 1\n return signature(ary, *args)\n\n shape = normalize_shape(shape)\n if shape is None:\n return\n\n assert ary.ndim == shape.count\n return signature(self.resolve_T(ary), shape)\n\n else:\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"transpose({0}) is not supported\".format(\n ', '.join(args)))\n assert ary.ndim == len(args)\n return signature(self.resolve_T(ary), *args)\n\n @bound_function(\"array.copy\")\n def resolve_copy(self, ary, args, kws):\n assert not args\n assert not kws\n retty = ary.copy(layout=\"C\", readonly=False)\n return signature(retty)\n\n @bound_function(\"array.item\")\n def resolve_item(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if not args:\n return signature(ary.dtype)\n\n @bound_function(\"array.itemset\")\n def resolve_itemset(self, ary, args, kws):\n assert not kws\n # We don't support explicit arguments as that's exactly equivalent\n # to regular indexing. The no-argument form is interesting to\n # allow some degree of genericity when writing functions.\n if len(args) == 1:\n return signature(types.none, ary.dtype)\n\n @bound_function(\"array.nonzero\")\n def resolve_nonzero(self, ary, args, kws):\n assert not args\n assert not kws\n # 0-dim arrays return one result array\n ndim = max(ary.ndim, 1)\n retty = types.UniTuple(types.Array(types.intp, 1, 'C'), ndim)\n return signature(retty)\n\n @bound_function(\"array.reshape\")\n def resolve_reshape(self, ary, args, kws):\n def sentry_shape_scalar(ty):\n if ty in types.number_domain:\n # Guard against non integer type\n if not isinstance(ty, types.Integer):\n raise TypeError(\"reshape() arg cannot be {0}\".format(ty))\n return True\n else:\n return False\n\n assert not kws\n if ary.layout not in 'CF':\n # only work for contiguous array\n raise TypeError(\"reshape() supports contiguous array only\")\n\n if len(args) == 1:\n # single arg\n shape, = args\n\n if sentry_shape_scalar(shape):\n ndim = 1\n else:\n shape = normalize_shape(shape)\n if shape is None:\n return\n ndim = shape.count\n retty = ary.copy(ndim=ndim)\n return signature(retty, shape)\n\n elif len(args) == 0:\n # no arg\n raise TypeError(\"reshape() take at least one arg\")\n\n else:\n # vararg case\n if any(not sentry_shape_scalar(a) for a in args):\n raise TypeError(\"reshape({0}) is not supported\".format(\n ', '.join(args)))\n\n retty = ary.copy(ndim=len(args))\n return signature(retty, *args)\n\n @bound_function(\"array.sort\")\n def resolve_sort(self, ary, args, kws):\n assert not args\n assert not kws\n if ary.ndim == 1:\n return signature(types.none)\n\n @bound_function(\"array.argsort\")\n def resolve_argsort(self, ary, args, kws):\n assert not args\n kwargs = dict(kws)\n kind = kwargs.pop('kind', types.Const('quicksort'))\n if kwargs:\n msg = \"Unsupported keywords: {!r}\"\n raise TypingError(msg.format([k for k in kwargs.keys()]))\n if ary.ndim == 1:\n def argsort_stub(kind='quicksort'):\n pass\n pysig = utils.pysignature(argsort_stub)\n sig = signature(types.Array(types.intp, 1, 'C'), kind).replace(pysig=pysig)\n return sig\n\n @bound_function(\"array.view\")\n def resolve_view(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n retty = ary.copy(dtype=dtype)\n return signature(retty, *args)\n\n @bound_function(\"array.astype\")\n def resolve_astype(self, ary, args, kws):\n from .npydecl import _parse_dtype\n assert not kws\n dtype, = args\n dtype = _parse_dtype(dtype)\n if dtype is None:\n return\n if not self.context.can_convert(ary.dtype, dtype):\n raise TypeError(\"astype(%s) not supported on %s: \"\n \"cannot convert from %s to %s\"\n % (dtype, ary, ary.dtype, dtype))\n layout = ary.layout if ary.layout in 'CF' else 'C'\n retty = ary.copy(dtype=dtype, layout=layout)\n return signature(retty, *args)\n\n @bound_function(\"array.ravel\")\n def resolve_ravel(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.flatten\")\n def resolve_flatten(self, ary, args, kws):\n # Only support no argument version (default order='C')\n assert not kws\n assert not args\n return signature(ary.copy(ndim=1, layout='C'))\n\n @bound_function(\"array.take\")\n def resolve_take(self, ary, args, kws):\n assert not kws\n argty, = args\n if isinstance(argty, types.Integer):\n sig = signature(ary.dtype, *args)\n elif isinstance(argty, types.Array):\n sig = signature(argty.copy(layout='C', dtype=ary.dtype), *args)\n elif isinstance(argty, types.List): # 1d lists only\n sig = signature(types.Array(ary.dtype, 1, 'C'), *args)\n elif isinstance(argty, types.BaseTuple):\n sig = signature(types.Array(ary.dtype, np.ndim(argty), 'C'), *args)\n else:\n raise TypeError(\"take(%s) not supported for %s\" % argty)\n return sig\n\n def generic_resolve(self, ary, attr):\n # Resolution of other attributes, for record arrays\n if isinstance(ary.dtype, types.Record):\n if attr in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(attr), layout='A')\n\n\n@infer_getattr\nclass DTypeAttr(AttributeTemplate):\n key = types.DType\n\n def resolve_type(self, ary):\n # Wrap the numeric type in NumberClass\n return types.NumberClass(ary.dtype)\n\n def resolve_kind(self, ary):\n if isinstance(ary.key, types.scalars.Float):\n val = 'f'\n elif isinstance(ary.key, types.scalars.Integer):\n val = 'i'\n else:\n return None # other types not supported yet\n return types.Const(val)\n\n@infer\nclass StaticGetItemArray(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n ary, idx = args\n if (isinstance(ary, types.Array) and isinstance(idx, str) and\n isinstance(ary.dtype, types.Record)):\n if idx in ary.dtype.fields:\n return ary.copy(dtype=ary.dtype.typeof(idx), layout='A')\n\n\n@infer_getattr\nclass RecordAttribute(AttributeTemplate):\n key = types.Record\n\n def generic_resolve(self, record, attr):\n ret = record.typeof(attr)\n assert ret\n return ret\n\n@infer\nclass StaticGetItemRecord(AbstractTemplate):\n key = \"static_getitem\"\n\n def generic(self, args, kws):\n # Resolution of members for records\n record, idx = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n ret = record.typeof(idx)\n assert ret\n return ret\n\n@infer\nclass StaticSetItemRecord(AbstractTemplate):\n key = \"static_setitem\"\n\n def generic(self, args, kws):\n # Resolution of members for record and structured arrays\n record, idx, value = args\n if isinstance(record, types.Record) and isinstance(idx, str):\n expectedty = record.typeof(idx)\n if self.context.can_convert(value, expectedty) is not None:\n return signature(types.void, record, types.Const(idx), value)\n\n\n@infer_getattr\nclass ArrayCTypesAttribute(AttributeTemplate):\n key = types.ArrayCTypes\n\n def resolve_data(self, ctinfo):\n return types.uintp\n\n\n@infer_getattr\nclass ArrayFlagsAttribute(AttributeTemplate):\n key = types.ArrayFlags\n\n def resolve_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_c_contiguous(self, ctflags):\n return types.boolean\n\n def resolve_f_contiguous(self, ctflags):\n return types.boolean\n\n\n@infer_getattr\nclass NestedArrayAttribute(ArrayAttribute):\n key = types.NestedArray\n\n\ndef _expand_integer(ty):\n \"\"\"\n If *ty* is an integer, expand it to a machine int (like Numpy).\n \"\"\"\n if isinstance(ty, types.Integer):\n if ty.signed:\n return max(types.intp, ty)\n else:\n return max(types.uintp, ty)\n elif isinstance(ty, types.Boolean):\n return types.intp\n else:\n return ty\n\ndef generic_homog(self, args, kws):\n assert not args\n assert not kws\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_expand(self, args, kws):\n assert not args\n assert not kws\n return signature(_expand_integer(self.this.dtype), recvr=self.this)\n\ndef sum_expand(self, args, kws):\n \"\"\"\n sum can be called with or without an axis parameter.\n \"\"\"\n pysig = None\n if kws:\n def sum_stub(axis):\n pass\n pysig = utils.pysignature(sum_stub)\n # rewrite args\n args = list(args) + [kws['axis']]\n kws = None\n args_len = len(args)\n assert args_len <= 1\n if args_len == 0:\n # No axis parameter so the return type of the summation is a scalar\n # of the type of the array.\n out = signature(_expand_integer(self.this.dtype), *args,\n recvr=self.this)\n else:\n # There is an axis paramter so the return type of this summation is\n # an array of dimension one less than the input array.\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=self.this.ndim-1, layout='C')\n out = signature(return_type, *args, recvr=self.this)\n return out.replace(pysig=pysig)\n\ndef generic_expand_cumulative(self, args, kws):\n assert not args\n assert not kws\n assert isinstance(self.this, types.Array)\n return_type = types.Array(dtype=_expand_integer(self.this.dtype),\n ndim=1, layout='C')\n return signature(return_type, recvr=self.this)\n\ndef generic_hetero_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_hetero_always_real(self, args, kws):\n assert not args\n assert not kws\n if isinstance(self.this.dtype, (types.Integer, types.Boolean)):\n return signature(types.float64, recvr=self.this)\n if isinstance(self.this.dtype, types.Complex):\n return signature(self.this.dtype.underlying_float, recvr=self.this)\n return signature(self.this.dtype, recvr=self.this)\n\ndef generic_index(self, args, kws):\n assert not args\n assert not kws\n return signature(types.intp, recvr=self.this)\n\ndef install_array_method(name, generic, support_literals=False):\n my_attr = {\"key\": \"array.\" + name, \"generic\": generic}\n temp_class = type(\"Array_\" + name, (AbstractTemplate,), my_attr)\n if support_literals:\n temp_class.support_literals = support_literals\n def array_attribute_attachment(self, ary):\n return types.BoundFunction(temp_class, ary)\n\n setattr(ArrayAttribute, \"resolve_\" + name, array_attribute_attachment)\n\n# Functions that return the same type as the array\nfor fname in [\"min\", \"max\"]:\n install_array_method(fname, generic_homog)\n\n# Functions that return a machine-width type, to avoid overflows\ninstall_array_method(\"prod\", generic_expand)\ninstall_array_method(\"sum\", sum_expand, support_literals=True)\n\n# Functions that return a machine-width type, to avoid overflows\nfor fname in [\"cumsum\", \"cumprod\"]:\n install_array_method(fname, generic_expand_cumulative)\n\n# Functions that require integer arrays get promoted to float64 return\nfor fName in [\"mean\"]:\n install_array_method(fName, generic_hetero_real)\n\n# var and std by definition return in real space and int arrays\n# get promoted to float64 return\nfor fName in [\"var\", \"std\"]:\n install_array_method(fName, generic_hetero_always_real)\n\n\n# Functions that return an index (intp)\ninstall_array_method(\"argmin\", generic_index)\ninstall_array_method(\"argmax\", generic_index)\n",
"path": "numba/typing/arraydecl.py"
}
] | diff --git a/numba/tests/test_fancy_indexing.py b/numba/tests/test_fancy_indexing.py
index fd8be585bde..efa037ba5ac 100644
--- a/numba/tests/test_fancy_indexing.py
+++ b/numba/tests/test_fancy_indexing.py
@@ -117,6 +117,16 @@ def test_getitem_tuple_and_ellipsis(self):
self.check_getitem_indices(arr, indices)
+ def test_ellipsis_getsetitem(self):
+ # See https://github.com/numba/numba/issues/3225
+ @jit(nopython=True)
+ def foo(arr, v):
+ arr[..., 0] = arr[..., 1]
+
+ arr = np.arange(2)
+ foo(arr, 1)
+ self.assertEqual(arr[0], arr[1])
+
@tag('important')
def test_getitem_array(self):
# Test advanced indexing with a single array index
@@ -187,7 +197,7 @@ def check(arr, ind):
# 3. nd array index
# 4. reflected list
# 5. tuples
-
+
test_indices = []
test_indices.append(1)
test_indices.append(np.array([1, 5, 1, 11, 3]))
@@ -195,7 +205,7 @@ def check(arr, ind):
test_indices.append([1, 5, 1, 11, 3])
test_indices.append((1, 5, 1))
test_indices.append(((1, 5, 1), (11, 3, 2)))
-
+
for dt in [np.int64, np.complex128]:
A = np.arange(12, dtype=dt).reshape((4, 3))
for ind in test_indices:
@@ -212,7 +222,7 @@ def check(arr, ind):
# check float indexing raises
with self.assertRaises(TypingError):
cfunc(A, [1.7])
-
+
# check unsupported arg raises
with self.assertRaises(TypingError):
take_kws = jit(nopython=True)(np_take_kws)
diff --git a/numba/tests/test_indexing.py b/numba/tests/test_indexing.py
index 3b075bbd6f9..813d07e5aa1 100644
--- a/numba/tests/test_indexing.py
+++ b/numba/tests/test_indexing.py
@@ -679,7 +679,7 @@ def run(a):
cfunc = compile_func(a)
for i, j in itertools.product(bounds, bounds):
x = cfunc(a, i, j)
- self.assertPreciseEqual(pyfunc(a, i, j), cfunc(a, i, j))
+ np.testing.assert_equal(pyfunc(a, i, j), cfunc(a, i, j))
run(np.arange(16, dtype='i4').reshape(4, 4))
run(np.arange(27, dtype='i4').reshape(3, 3, 3))
diff --git a/numba/typing/arraydecl.py b/numba/typing/arraydecl.py
index 6119200fe0c..ba3cd2ec6d9 100644
--- a/numba/typing/arraydecl.py
+++ b/numba/typing/arraydecl.py
@@ -144,7 +144,11 @@ def check_contiguity(outer_indices):
elif not check_contiguity(right_indices[::-1]):
layout = 'A'
- res = ary.copy(ndim=ndim, layout=layout)
+ if ndim == 0:
+ # Implicitly convert to a scalar if the output ndim==0
+ res = ary.dtype
+ else:
+ res = ary.copy(ndim=ndim, layout=layout)
# Re-wrap indices
if isinstance(idx, types.BaseTuple):
| Ellipsis indexing and assignment not working when needed number of `:` is zero
I was trying to index using `...` on both sides of assignment so I could use the same function for inputs with 1, 2, or N dimensions.
Here is code for the MCVE (using `numba` 0.39.0):
```python
import numpy as np
import numba
def func(A, B, indices):
rv = A.copy()
for i in range(indices.size):
index = indices[i]
rv[..., index] = B[..., index]
return rv
jitted = numba.njit(func)
A = np.ones((3, 5))
B = 2 * np.ones((3, 5))
indices = np.array([0, 2])
jitted(A[0], B[0], indices) # <-- raises. traceback below
```
Let's compare results using `numpy` and `numba`:
```python
In [2]: func(A, B, indices)
Out[2]:
array([[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.]])
In [3]: func(A[0], B[0], indices)
Out[3]: array([2., 1., 2., 1., 1.])
In [4]: jitted(A, B, indices)
Out[4]:
array([[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.],
[2., 1., 2., 1., 1.]])
In [5]: jitted(A[0], B[0], indices) # <-- raises. traceback below
```
Traceback:
<details>
```python-traceback
---------------------------------------------------------------------------
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
674 try:
--> 675 impl = self._casts.find((fromty, toty))
676 return impl(self, builder, fromty, toty, val)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in find(self, sig)
47 if out is None:
---> 48 out = self._find(sig)
49 self._cache[sig] = out
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in _find(self, sig)
56 else:
---> 57 raise NotImplementedError(self, sig)
58
NotImplementedError: (<numba.targets.base.OverloadSelector object at 0x1059269e8>, (array(float64, 0d, C), float64))
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
576 try:
--> 577 yield
578 except NumbaError as e:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_inst(self, inst)
357 assert signature is not None
--> 358 return self.lower_setitem(inst.target, inst.index, inst.value, signature)
359
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_setitem(self, target_var, index_var, value_var, signature)
429
--> 430 return impl(self.builder, (target, index, value))
431
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in __call__(self, builder, args)
1078 def __call__(self, builder, args):
-> 1079 return self._imp(self._context, builder, self._sig, args)
1080
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/arrayobj.py in setitem_array(context, builder, sig, args)
481 # Store source value the given location
--> 482 val = context.cast(builder, val, valty, aryty.dtype)
483 store_item(context, builder, aryty, val, dataptr)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/targets/base.py in cast(self, builder, val, fromty, toty)
678 raise NotImplementedError(
--> 679 "Cannot cast %s to %s: %s" % (fromty, toty, val))
680
NotImplementedError: Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
During handling of the above exception, another exception occurred:
LoweringError Traceback (most recent call last)
<ipython-input-6-e6ce0775290a> in <module>()
----> 1 jitted(A[0], B[0], indices)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
366 e.patch_message(''.join(e.args) + help_msg)
367 # ignore the FULL_TRACEBACKS config, this needs reporting!
--> 368 raise e
369
370 def inspect_llvm(self, signature=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in _compile_for_args(self, *args, **kws)
323 argtypes.append(self.typeof_pyval(a))
324 try:
--> 325 return self.compile(tuple(argtypes))
326 except errors.TypingError as e:
327 # Intercept typing error that may be due to an argument
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, sig)
651
652 self._cache_misses[sig] += 1
--> 653 cres = self._compiler.compile(args, return_type)
654 self.add_overload(cres)
655 self._cache.save_overload(sig, cres)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/dispatcher.py in compile(self, args, return_type)
81 args=args, return_type=return_type,
82 flags=flags, locals=self.locals,
---> 83 pipeline_class=self.pipeline_class)
84 # Check typing error if object mode is used
85 if cres.typing_error is not None and not flags.enable_pyobject:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(typingctx, targetctx, func, args, return_type, flags, locals, library, pipeline_class)
871 pipeline = pipeline_class(typingctx, targetctx, library,
872 args, return_type, flags, locals)
--> 873 return pipeline.compile_extra(func)
874
875
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in compile_extra(self, func)
365 self.lifted = ()
366 self.lifted_from = None
--> 367 return self._compile_bytecode()
368
369 def compile_ir(self, func_ir, lifted=(), lifted_from=None):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_bytecode(self)
802 """
803 assert self.func_ir is None
--> 804 return self._compile_core()
805
806 def _compile_ir(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _compile_core(self)
789 self.define_pipelines(pm)
790 pm.finalize()
--> 791 res = pm.run(self.status)
792 if res is not None:
793 # Early pipeline completion
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
251 # No more fallback pipelines?
252 if is_final_pipeline:
--> 253 raise patched_exception
254 # Go to next fallback pipeline
255 else:
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in run(self, status)
243 try:
244 event(stage_name)
--> 245 stage()
246 except _EarlyPipelineCompletion as e:
247 return e.result
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in stage_nopython_backend(self)
676 """
677 lowerfn = self.backend_nopython_mode
--> 678 self._backend(lowerfn, objectmode=False)
679
680 def stage_compile_interp_mode(self):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in _backend(self, lowerfn, objectmode)
626 self.library.enable_object_caching()
627
--> 628 lowered = lowerfn()
629 signature = typing.signature(self.return_type, *self.args)
630 self.cr = compile_result(
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in backend_nopython_mode(self)
613 self.return_type,
614 self.calltypes,
--> 615 self.flags)
616
617 def _backend(self, lowerfn, objectmode):
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/compiler.py in native_lowering_stage(targetctx, library, interp, typemap, restype, calltypes, flags)
990
991 lower = lowering.Lower(targetctx, library, fndesc, interp)
--> 992 lower.lower()
993 if not flags.no_cpython_wrapper:
994 lower.create_cpython_wrapper(flags.release_gil)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower(self)
171 if self.generator_info is None:
172 self.genlower = None
--> 173 self.lower_normal_function(self.fndesc)
174 else:
175 self.genlower = self.GeneratorLower(self)
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_normal_function(self, fndesc)
212 # Init argument values
213 self.extract_function_arguments()
--> 214 entry_block_tail = self.lower_function_body()
215
216 # Close tail of entry block
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_function_body(self)
237 bb = self.blkmap[offset]
238 self.builder.position_at_end(bb)
--> 239 self.lower_block(block)
240
241 self.post_lower()
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/lowering.py in lower_block(self, block)
252 with new_error_context('lowering "{inst}" at {loc}', inst=inst,
253 loc=self.loc, errcls_=defaulterrcls):
--> 254 self.lower_inst(inst)
255
256 def create_cpython_wrapper(self, release_gil=False):
~/miniconda3/envs/numba3/lib/python3.7/contextlib.py in __exit__(self, type, value, traceback)
128 value = type()
129 try:
--> 130 self.gen.throw(type, value, traceback)
131 except StopIteration as exc:
132 # Suppress StopIteration *unless* it's the same exception that
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/errors.py in new_error_context(fmt_, *args, **kwargs)
583 from numba import config
584 tb = sys.exc_info()[2] if config.FULL_TRACEBACKS else None
--> 585 six.reraise(type(newerr), newerr, tb)
586
587
~/miniconda3/envs/numba3/lib/python3.7/site-packages/numba/six.py in reraise(tp, value, tb)
657 if value.__traceback__ is not tb:
658 raise value.with_traceback(tb)
--> 659 raise value
660
661 else:
LoweringError: Failed at nopython (nopython mode backend)
Cannot cast array(float64, 0d, C) to float64: %".417" = load {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}, {i8*, i8*, i64, i64, double*, [0 x i64], [0 x i64]}* %"$22.9"
File "<ipython-input-1-f6cc8d5fb861>", line 8:
def func(A, B, indices):
<source elided>
index = indices[i]
rv[..., index] = B[..., index]
^
[1] During: lowering "rv[$22.13] = $22.9" at <ipython-input-1-f6cc8d5fb861> (8)
-------------------------------------------------------------------------------
This should not have happened, a problem has occurred in Numba's internals.
Please report the error message and traceback, along with a minimal reproducer
at: https://github.com/numba/numba/issues/new
If more help is needed please feel free to speak to the Numba core developers
directly at: https://gitter.im/numba/numba
Thanks in advance for your help in improving Numba!
```
</details>
|
Theano__Theano-4343 | [
{
"content": "\"\"\"\nAbstract conv interface\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport logging\nfrom six import reraise, integer_types\nimport sys\n\nimport theano\n\nfrom theano.tensor import as_tensor_variable, patternbroadcast\nfrom theano.tensor import get_scalar_constant_value, NotScalarConstantError\nfrom theano.gof import Apply, Op\n\nfrom six.moves import xrange\n\nimport warnings\nimport numpy\nimport numpy as np\n\ntry:\n from scipy.signal.signaltools import _valfrommode, _bvalfromboundary\n from scipy.signal.sigtools import _convolve2d\n imported_scipy_signal = True\nexcept ImportError:\n imported_scipy_signal = False\n\n\n__docformat__ = \"restructuredtext en\"\n_logger = logging.getLogger(\"theano.tensor.nnet.abstract_conv\")\n\n\ndef get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: tuple of int (symbolic or numeric) corresponding to the input\n image shape. Its four (or five) element must correspond respectively\n to: batch size, number of input channels, height and width (and\n possibly depth) of the image. None where undefined.\n kernel_shape: tuple of int (symbolic or numeric) corresponding to the\n kernel shape. Its four (or five) elements must correspond respectively\n to: number of output channels, number of input channels, height and\n width (and possibly depth) of the kernel. None where undefined.\n border_mode: string, int (symbolic or numeric) or tuple of int (symbolic\n or numeric). If it is a string, it must be 'valid', 'half' or 'full'.\n If it is a tuple, its two (or three) elements respectively correspond\n to the padding on height and width (and possibly depth) axis.\n subsample: tuple of int (symbolic or numeric). Its or three elements\n espectively correspond to the subsampling on height and width (and\n possibly depth) axis.\n\n Returns\n -------\n output_shape: tuple of int corresponding to the output image shape. Its\n four element must correspond respectively to: batch size, number of\n output channels, height and width of the image. None where undefined.\n\n \"\"\"\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if isinstance(border_mode, tuple):\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i], subsample[i])\n for i in range(len(subsample)))\n else:\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode, subsample[i])\n for i in range(len(subsample)))\n return (bsize, nkern) + out_shp\n\n\ndef get_conv_shape_1axis(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: int or None. Corresponds to the input image shape on a\n given axis. None if undefined.\n kernel_shape: int or None. Corresponds to the kernel shape on a given\n axis. None if undefined.\n border_mode: string or int. If it is a string, it must be\n 'valid', 'half' or 'full'. If it is an integer, it must correspond to\n the padding on the considered axis.\n subsample: int. It must correspond to the subsampling on the\n considered axis.\n\n Returns\n -------\n out_shp: int corresponding to the output image shape on the\n considered axis. None if undefined.\n\n \"\"\"\n if None in [image_shape, kernel_shape, border_mode, subsample]:\n return None\n if border_mode == \"half\":\n pad = kernel_shape // 2\n elif border_mode == \"full\":\n pad = kernel_shape - 1\n elif border_mode == \"valid\":\n pad = 0\n else:\n pad = border_mode\n if pad < 0:\n raise ValueError(\"border_mode must be >= 0\")\n out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1\n\n return out_shp\n\n\ndef conv2d(input,\n filters,\n input_shape=None,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"This function will build the symbolic graph for convolving a mini-batch of a\n stack of 2D inputs with a set of 2D filters. The implementation is modelled\n after Convolutional Neural Networks (CNN).\n\n Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.\n \"\"\"\n\n input = as_tensor_variable(input)\n filters = as_tensor_variable(filters)\n conv_op = AbstractConv2d(imshp=input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n return conv_op(input, filters)\n\n\ndef conv2d_grad_wrt_inputs(output_grad,\n filters,\n input_shape,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its inputs\n\n This function builds the symbolic graph for getting the\n gradient of the output of a convolution (namely output_grad)\n w.r.t the input of the convolution, given a set of 2D filters\n used by the convolution, such that the output_grad is upsampled\n to the input_shape.\n\n Parameters\n ----------\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the tensor that\n will be upsampled or the output gradient of the convolution\n whose gradient will be taken with respect to the input of the\n convolution.\n filters : symbolic 4D tensor\n set of filters used in CNN layer of shape (output channels,\n input channels, filter rows, filter columns). See the\n optional parameter ``filter_shape``.\n input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the input (upsampled) parameter.\n A tuple/list of len 4, with the first two dimensions\n being None or int or Constant and the last two dimensions being\n Tensor or int or Constant.\n Not Optional, since given the output_grad shape\n and the subsample values, multiple input_shape may be\n plausible.\n filter_shape : None or [None/int/Constant] * 4\n The shape of the filters parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that\n this element is not known at compile time.\n border_mode : str, int or tuple of two int\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass. Also called strides\n elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n filters = as_tensor_variable(filters)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of input_shape\n for dim in [0, 1]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of filter_shape\n if filter_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of input_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_input_shape = list(input_shape)\n for dim in [2, 3]:\n if isinstance(input_shape[dim], theano.tensor.TensorVariable):\n numerical_input_shape[dim] = None\n\n grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return grad_input_op(filters, output_grad, input_shape[-2:])\n\n\ndef conv2d_grad_wrt_weights(input,\n output_grad,\n filter_shape,\n input_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its weights\n\n This function will build the symbolic graph for getting the\n gradient of the output of a convolution (output_grad) w.r.t its wights.\n\n Parameters\n ----------\n input : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the input of\n the convolution in the forward pass.\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the gradient of\n the output of convolution.\n filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the filter parameter. A tuple/list of len 4, with the\n first two dimensions being None or int or Constant and the last two\n dimensions being Tensor or int or Constant.\n Not Optional, since given the output_grad shape and\n the input_shape, multiple filter_shape may be plausible.\n input_shape : None or [None/int/Constant] * 4\n The shape of the input parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify\n that this element is not known at compile time.\n border_mode : str, int or tuple of two ints\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass of the convolutional\n operation. Also called strides elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n input = as_tensor_variable(input)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of filter_shape\n for dim in [0, 1]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of input_shape\n if input_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of filter_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_filter_shape = list(filter_shape)\n for dim in [2, 3]:\n if isinstance(filter_shape[dim], theano.tensor.TensorVariable):\n numerical_filter_shape[dim] = None\n\n gradWeight_op = AbstractConv2d_gradWeights(imshp=input_shape,\n kshp=numerical_filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return gradWeight_op(input, output_grad, filter_shape[:-2])\n\n\ndef bilinear_kernel_2D(ratio, normalize=True):\n \"\"\"Compute 2D kernel for bilinear upsampling\n\n This function builds the 2D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 2D tensor\n the 2D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in two dimensions.\n\n \"\"\"\n\n hkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle('x', 0)\n vkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle(0, 'x')\n kern = hkern * vkern\n return kern\n\n\ndef bilinear_kernel_1D(ratio, normalize=True):\n \"\"\"Compute 1D kernel for bilinear upsampling\n\n This function builds the 1D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 1D tensor\n the 1D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in one dimension.\n\n \"\"\"\n\n T = theano.tensor\n half_kern = T.arange(1, ratio + 1, dtype=theano.config.floatX)\n kern = T.concatenate([half_kern, half_kern[-2::-1]])\n\n if normalize:\n kern /= ratio\n return kern\n\n\ndef bilinear_upsampling(input,\n ratio,\n batch_size=None,\n num_input_channels=None,\n use_1D_kernel=True):\n \"\"\"Compute bilinear upsampling\n\n This function will build the symbolic graph for upsampling\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n input: symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size,\n input channels, input rows, input columns) that will be upsampled.\n\n ratio: int or Constant or Scalar Tensor of int* dtype\n the ratio by which the input is upsampled in the 2D space (row and\n col size).\n\n batch_size: None, int or Constant variable\n The size of the first dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n batch_size will be used only if num_input_channels is not None.\n\n num_input_channels: None, int or Constant variable\n The size of the second dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n num_input_channels will be used only if batch_size is not None.\n\n use_1D_kernel: bool\n if set to true, row and column will be upsampled seperately by 1D\n kernels, otherwise they are upsampled together using a 2D kernel. The\n final result is the same, only the speed can differ, given factors such\n as upsampling ratio.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by bilinear upsampling. Tensor\n is of shape (batch size, num_input_channels, input row size * ratio,\n input column size * ratio)\n\n Notes\n -----\n\n :note: The kernel used for bilinear interpolation is fixed (not learned).\n\n :note: When the upsampling ratio is even, the last row and column is\n repeated one extra time compared to the first row and column which makes\n the upsampled tensor asymmetrical on both sides. This does not happen when\n the upsampling ratio is odd.\n\n \"\"\"\n\n T = theano.tensor\n try:\n up_bs = batch_size * num_input_channels\n except TypeError:\n up_bs = None\n row, col = input.shape[2:]\n up_input = input.reshape((-1, 1, row, col))\n\n # concatenating the first and last row and column\n # first and last row\n concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,\n up_input[:, :, -1:, :]), axis=2)\n # first and last col\n concat_mat = T.concatenate((concat_mat[:, :, :, :1], concat_mat,\n concat_mat[:, :, :, -1:]), axis=3)\n concat_col = col + 2\n\n pad = 2 * ratio - (ratio - 1) // 2 - 1\n\n if use_1D_kernel:\n kern = bilinear_kernel_1D(ratio=ratio, normalize=True)\n # upsampling rows\n upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :,\n np.newaxis],\n input_shape=(up_bs, 1,\n row * ratio,\n concat_col),\n filter_shape=(1, 1, None, 1),\n border_mode=(pad, 0),\n subsample=(ratio, 1),\n filter_flip=True)\n # upsampling cols\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=upsampled_row,\n filters=kern[np.newaxis,\n np.newaxis,\n np.newaxis, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, 1, None),\n border_mode=(0, pad),\n subsample=(1, ratio),\n filter_flip=True)\n else:\n kern = bilinear_kernel_2D(ratio=ratio, normalize=True)\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, None, None),\n border_mode=(pad, pad),\n subsample=(ratio, ratio),\n filter_flip=True)\n\n return upsampled_mat.reshape((batch_size, num_input_channels,\n row * ratio, col * ratio))\n\n\nclass BaseAbstractConv2d(Op):\n \"\"\"Base class for AbstractConv\n\n Define an abstract convolution op that will be replaced with the\n appropriate implementation\n\n Parameters\n ----------\n imshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the input parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n imshp is defined w.r.t the forward conv.\n\n kshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the filters parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n kshp is defined w.r.t the forward conv.\n\n border_mode: str, int or tuple of two int\n Either of the following:\n\n ``'valid'``: apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter shape + 1\n ``'full'``: apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n ``'half'``: pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a valid\n convolution. For filters with an odd number of rows and columns, this\n leads to the output shape being equal to the input shape.\n ``int``: pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows\n and ``int2`` columns, then perform a valid convolution.\n\n subsample: tuple of len 2\n Factor by which to subsample the output.\n Also called strides elsewhere.\n\n filter_flip: bool\n If ``True``, will flip the filter rows and columns\n before sliding them over the input. This operation is normally referred\n to as a convolution, and this is the default. If ``False``, the filters\n are not flipped and the operation is referred to as a\n cross-correlation.\n\n \"\"\"\n check_broadcast = False\n __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')\n\n def __init__(self,\n imshp=None, kshp=None,\n border_mode=\"valid\", subsample=(1, 1),\n filter_flip=True):\n\n if isinstance(border_mode, integer_types):\n border_mode = (border_mode, border_mode)\n if isinstance(border_mode, tuple):\n pad_h, pad_w = map(int, border_mode)\n border_mode = (pad_h, pad_w)\n if border_mode == (0, 0):\n border_mode = 'valid'\n if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or\n border_mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(border_mode))\n\n self.imshp = tuple(imshp) if imshp else (None,) * 4\n for imshp_i in self.imshp:\n if imshp_i is not None:\n # Components of imshp should be constant or ints\n try:\n get_scalar_constant_value(imshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"imshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.kshp = tuple(kshp) if kshp else (None,) * 4\n for kshp_i in self.kshp:\n if kshp_i is not None:\n # Components of kshp should be constant or ints\n try:\n get_scalar_constant_value(kshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"kshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.border_mode = border_mode\n self.filter_flip = filter_flip\n\n if len(subsample) != 2:\n raise ValueError(\"subsample must have two elements\")\n self.subsample = tuple(subsample)\n\n def flops(self, inp, outp):\n \"\"\" Useful with the hack in profilemode to print the MFlops\"\"\"\n # if the output shape is correct, then this gives the correct\n # flops for any direction, sampling, padding, and border mode\n inputs, filters = inp\n outputs, = outp\n assert inputs[1] == filters[1]\n # nb mul and add by output pixel\n flops = filters[2] * filters[3] * 2\n # nb flops by output image\n flops *= outputs[2] * outputs[3]\n # nb patch multiplied\n flops *= inputs[1] * filters[0] * inputs[0]\n return flops\n\n def do_constant_folding(self, node):\n # Disable constant folding since there is no implementation.\n # This may change in the future.\n return False\n\n def conv2d(self, img, kern, mode=\"valid\"):\n \"\"\"\n Basic slow python implementatation for DebugMode\n \"\"\"\n\n if not imported_scipy_signal:\n raise NotImplementedError(\n \"AbstractConv perform requires the python package\"\n \" for scipy.signal to be installed.\")\n if not (mode in ('valid', 'full')):\n raise ValueError(\n 'invalid mode {}, which must be either '\n '\"valid\" or \"full\"'.format(mode))\n\n out_shape = get_conv_output_shape(img.shape, kern.shape, mode, [1, 1])\n out = numpy.zeros(out_shape, dtype=img.dtype)\n val = _valfrommode(mode)\n bval = _bvalfromboundary('fill')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', numpy.ComplexWarning)\n for b in xrange(img.shape[0]):\n for n in xrange(kern.shape[0]):\n for im0 in xrange(img.shape[1]):\n # some cast generates a warning here\n out[b, n, ...] += _convolve2d(img[b, im0, ...],\n kern[n, im0, ...],\n 1, val, bval, 0)\n return out\n\n\nclass AbstractConv2d(BaseAbstractConv2d):\n \"\"\" Abstract Op for the forward convolution.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d, self).__init__(imshp, kshp,\n border_mode, subsample,\n filter_flip)\n\n def make_node(self, img, kern):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n ktype = img.type.clone(dtype=kern.dtype,\n broadcastable=kern.broadcastable)\n kern = ktype.filter_variable(kern)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n\n broadcastable = [img.broadcastable[0],\n kern.broadcastable[0],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, kern], [output])\n\n def perform(self, node, inp, out_):\n img, kern = inp\n img = numpy.asarray(img)\n kern = numpy.asarray(kern)\n o, = out_\n mode = self.border_mode\n\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n mode = (kern.shape[2] // 2, kern.shape[3] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n if not self.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n conv_out = self.conv2d(img, kern, mode=\"valid\")\n conv_out = conv_out[:, :, ::self.subsample[0], ::self.subsample[1]]\n\n o[0] = node.outputs[0].type.filter(conv_out)\n\n def R_op(self, inputs, eval_points):\n rval = None\n if eval_points[0] is not None:\n rval = self.make_node(eval_points[0], inputs[1]).outputs[0]\n if eval_points[1] is not None:\n if rval is None:\n rval = self.make_node(inputs[0], eval_points[1]).outputs[0]\n else:\n rval += self.make_node(inputs[0], eval_points[1]).outputs[0]\n return [rval]\n\n def grad(self, inp, grads):\n bottom, weights = inp\n top, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights, top, bottom.shape[-2:])\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n\n bottom, top, weights.shape[-2:])\n\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n return d_bottom, d_weights\n\n def infer_shape(self, node, input_shapes):\n imshp = input_shapes[0]\n kshp = input_shapes[1]\n\n # replace symbolic shapes with known constant shapes\n if self.imshp is not None:\n imshp = [imshp[i] if self.imshp[i] is None else self.imshp[i]\n for i in range(4)]\n if self.kshp is not None:\n kshp = [kshp[i] if self.kshp[i] is None else self.kshp[i]\n for i in range(4)]\n res = get_conv_output_shape(imshp, kshp, self.border_mode,\n self.subsample)\n return [res]\n\n\nclass AbstractConv2d_gradWeights(BaseAbstractConv2d):\n \"\"\"Gradient wrt. filters for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, img, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = img.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.broadcastable[1],\n img.broadcastable[1],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n img, topgrad, shape = inp\n img = numpy.asarray(img)\n topgrad = numpy.asarray(topgrad)\n\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (shape[0] - 1, shape[1] - 1)\n elif mode == \"half\":\n mode = (shape[0] // 2, shape[1] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n img.shape[2] - shape[0] + 1,\n img.shape[3] - shape[1] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n\n topgrad = topgrad.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n img = img.transpose(1, 0, 2, 3)\n kern = self.conv2d(img, topgrad, mode=\"valid\")\n if self.filter_flip:\n kern = kern.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n else:\n kern = kern.transpose(1, 0, 2, 3)\n o[0] = node.outputs[0].type.filter(kern)\n\n def grad(self, inp, grads):\n bottom, top = inp[:2]\n weights, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights,\n top,\n bottom.shape[-2:])\n d_top = AbstractConv2d(self.imshp,\n self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_bottom, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.kshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the kernel shape\n # from the shapes of inputs.\n imshp = input_shapes[0]\n topshp = input_shapes[1]\n kshp = self.kshp[:] if self.kshp is not None else [None] * 4\n fallback_kshp = [topshp[1], imshp[1], node.inputs[2][0], node.inputs[2][1]]\n kshp = [fallback_kshp[i] if kshp[i] is None else kshp[i]\n for i in range(4)]\n return [kshp]\n\n\nclass AbstractConv2d_gradInputs(BaseAbstractConv2d):\n \"\"\"Gradient wrt. inputs for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, kern, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = kern.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.type.broadcastable[0],\n kern.type.broadcastable[1],\n False, False]\n output = kern.type.clone(broadcastable=broadcastable)()\n return Apply(self, [kern, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n kern, topgrad, shape = inp\n kern = numpy.asarray(kern)\n topgrad = numpy.asarray(topgrad)\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n pad_h, pad_w = 0, 0\n if mode == \"full\":\n pad_h, pad_w = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n pad_h, pad_w = (kern.shape[2] // 2, kern.shape[3] // 2)\n elif isinstance(mode, tuple):\n pad_h, pad_w = map(int, self.border_mode)\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n shape[0] + 2 * pad_h - kern.shape[2] + 1,\n shape[1] + 2 * pad_w - kern.shape[3] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n kern = kern.transpose(1, 0, 2, 3)\n if self.filter_flip:\n topgrad = topgrad[:, :, ::-1, ::-1]\n img = self.conv2d(topgrad, kern, mode=\"full\")\n if self.filter_flip:\n img = img[:, :, ::-1, ::-1]\n if pad_h > 0 or pad_w > 0:\n img = img[:, :, pad_h:img.shape[2] - pad_h, pad_w:img.shape[3] - pad_w]\n o[0] = node.outputs[0].type.filter(img)\n\n def grad(self, inp, grads):\n weights, top = inp[:2]\n bottom, = grads\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample)(\n bottom, top,\n weights.shape[-2:])\n d_top = AbstractConv2d(self.imshp, self.kshp,\n self.border_mode, self.subsample)(\n bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_weights, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.imshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the image shape\n # from the shapes of inputs.\n kshp = input_shapes[0]\n topshp = input_shapes[1]\n imshp = self.imshp[:] if self.imshp is not None else [None] * 4\n fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],\n node.inputs[2][1]]\n imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]\n for i in range(4)]\n return [imshp]\n",
"path": "theano/tensor/nnet/abstract_conv.py"
}
] | [
{
"content": "\"\"\"\nAbstract conv interface\n\"\"\"\nfrom __future__ import absolute_import, print_function, division\n\nimport logging\nfrom six import reraise, integer_types\nimport sys\n\nimport theano\n\nfrom theano.tensor import as_tensor_variable, patternbroadcast\nfrom theano.tensor import get_scalar_constant_value, NotScalarConstantError\nfrom theano.gof import Apply, Op\n\nfrom six.moves import xrange\n\nimport warnings\nimport numpy\nimport numpy as np\n\ntry:\n from scipy.signal.signaltools import _valfrommode, _bvalfromboundary\n from scipy.signal.sigtools import _convolve2d\n imported_scipy_signal = True\nexcept ImportError:\n imported_scipy_signal = False\n\n\n__docformat__ = \"restructuredtext en\"\n_logger = logging.getLogger(\"theano.tensor.nnet.abstract_conv\")\n\n\ndef get_conv_output_shape(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: tuple of int (symbolic or numeric) corresponding to the input\n image shape. Its four (or five) element must correspond respectively\n to: batch size, number of input channels, height and width (and\n possibly depth) of the image. None where undefined.\n kernel_shape: tuple of int (symbolic or numeric) corresponding to the\n kernel shape. Its four (or five) elements must correspond respectively\n to: number of output channels, number of input channels, height and\n width (and possibly depth) of the kernel. None where undefined.\n border_mode: string, int (symbolic or numeric) or tuple of int (symbolic\n or numeric). If it is a string, it must be 'valid', 'half' or 'full'.\n If it is a tuple, its two (or three) elements respectively correspond\n to the padding on height and width (and possibly depth) axis.\n subsample: tuple of int (symbolic or numeric). Its or three elements\n espectively correspond to the subsampling on height and width (and\n possibly depth) axis.\n\n Returns\n -------\n output_shape: tuple of int corresponding to the output image shape. Its\n four element must correspond respectively to: batch size, number of\n output channels, height and width of the image. None where undefined.\n\n \"\"\"\n bsize, imshp = image_shape[0], image_shape[2:]\n nkern, kshp = kernel_shape[0], kernel_shape[2:]\n if isinstance(border_mode, tuple):\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode[i], subsample[i])\n for i in range(len(subsample)))\n else:\n out_shp = tuple(get_conv_shape_1axis(\n imshp[i], kshp[i], border_mode, subsample[i])\n for i in range(len(subsample)))\n return (bsize, nkern) + out_shp\n\n\ndef get_conv_shape_1axis(image_shape, kernel_shape,\n border_mode, subsample):\n \"\"\"\n This function compute the output shape of convolution operation.\n\n Parameters\n ----------\n image_shape: int or None. Corresponds to the input image shape on a\n given axis. None if undefined.\n kernel_shape: int or None. Corresponds to the kernel shape on a given\n axis. None if undefined.\n border_mode: string or int. If it is a string, it must be\n 'valid', 'half' or 'full'. If it is an integer, it must correspond to\n the padding on the considered axis.\n subsample: int. It must correspond to the subsampling on the\n considered axis.\n\n Returns\n -------\n out_shp: int corresponding to the output image shape on the\n considered axis. None if undefined.\n\n \"\"\"\n if None in [image_shape, kernel_shape, border_mode, subsample]:\n return None\n if border_mode == \"half\":\n pad = kernel_shape // 2\n elif border_mode == \"full\":\n pad = kernel_shape - 1\n elif border_mode == \"valid\":\n pad = 0\n else:\n pad = border_mode\n if pad < 0:\n raise ValueError(\"border_mode must be >= 0\")\n out_shp = (image_shape + 2 * pad - kernel_shape) // subsample + 1\n\n return out_shp\n\n\ndef conv2d(input,\n filters,\n input_shape=None,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"This function will build the symbolic graph for convolving a mini-batch of a\n stack of 2D inputs with a set of 2D filters. The implementation is modelled\n after Convolutional Neural Networks (CNN).\n\n Refer to :func:`nnet.conv2d <theano.tensor.nnet.conv2d>` for a more detailed documentation.\n \"\"\"\n\n input = as_tensor_variable(input)\n filters = as_tensor_variable(filters)\n conv_op = AbstractConv2d(imshp=input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n return conv_op(input, filters)\n\n\ndef conv2d_grad_wrt_inputs(output_grad,\n filters,\n input_shape,\n filter_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its inputs\n\n This function builds the symbolic graph for getting the\n gradient of the output of a convolution (namely output_grad)\n w.r.t the input of the convolution, given a set of 2D filters\n used by the convolution, such that the output_grad is upsampled\n to the input_shape.\n\n Parameters\n ----------\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the tensor that\n will be upsampled or the output gradient of the convolution\n whose gradient will be taken with respect to the input of the\n convolution.\n filters : symbolic 4D tensor\n set of filters used in CNN layer of shape (output channels,\n input channels, filter rows, filter columns). See the\n optional parameter ``filter_shape``.\n input_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the input (upsampled) parameter.\n A tuple/list of len 4, with the first two dimensions\n being None or int or Constant and the last two dimensions being\n Tensor or int or Constant.\n Not Optional, since given the output_grad shape\n and the subsample values, multiple input_shape may be\n plausible.\n filter_shape : None or [None/int/Constant] * 4\n The shape of the filters parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that\n this element is not known at compile time.\n border_mode : str, int or tuple of two int\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass. Also called strides\n elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n filters = as_tensor_variable(filters)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of input_shape\n for dim in [0, 1]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of filter_shape\n if filter_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of input_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_input_shape = list(input_shape)\n for dim in [2, 3]:\n if isinstance(input_shape[dim], theano.tensor.TensorVariable):\n numerical_input_shape[dim] = None\n\n grad_input_op = AbstractConv2d_gradInputs(imshp=numerical_input_shape,\n kshp=filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return grad_input_op(filters, output_grad, input_shape[-2:])\n\n\ndef conv2d_grad_wrt_weights(input,\n output_grad,\n filter_shape,\n input_shape=None,\n border_mode='valid',\n subsample=(1, 1),\n filter_flip=True):\n \"\"\"Compute conv output gradient w.r.t its weights\n\n This function will build the symbolic graph for getting the\n gradient of the output of a convolution (output_grad) w.r.t its wights.\n\n Parameters\n ----------\n input : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the input of\n the convolution in the forward pass.\n output_grad : symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size, input\n channels, input rows, input columns). This is the gradient of\n the output of convolution.\n filter_shape : [None/int/Constant] * 2 + [Tensor/int/Constant] * 2\n The shape of the filter parameter. A tuple/list of len 4, with the\n first two dimensions being None or int or Constant and the last two\n dimensions being Tensor or int or Constant.\n Not Optional, since given the output_grad shape and\n the input_shape, multiple filter_shape may be plausible.\n input_shape : None or [None/int/Constant] * 4\n The shape of the input parameter. None or a tuple/list of len 4.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify\n that this element is not known at compile time.\n border_mode : str, int or tuple of two ints\n Either of the following:\n\n ``'valid'``\n apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter\n shape + 1\n\n ``'full'``\n apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n\n ``'half'``\n pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a\n valid convolution. For filters with an odd number of rows\n and columns, this leads to the output shape being equal to\n the input shape. It is known as 'same' elsewhere.\n\n ``int``\n pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n\n ``(int1, int2)``\n pad input with a symmetric border of ``int1`` rows and\n ``int2`` columns, then perform a valid convolution.\n\n subsample : tuple of len 2\n The subsampling used in the forward pass of the convolutional\n operation. Also called strides elsewhere.\n filter_flip : bool\n If ``True``, will flip the filter rows and columns before\n sliding them over the input. This operation is normally\n referred to as a convolution, and this is the default. If\n ``False``, the filters are not flipped and the operation is\n referred to as a cross-correlation.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by convolutional layer. Tensor\n is of shape (batch size, output channels, output rows, output\n columns)\n\n Notes\n -----\n\n :note: If CuDNN is available, it will be used on the\n GPU. Otherwise, it is the *CorrMM* convolution that will be used\n \"caffe style convolution\".\n\n :note: This is only supported in Theano 0.8 or the development\n version until it is released.\n\n \"\"\"\n\n input = as_tensor_variable(input)\n output_grad = as_tensor_variable(output_grad)\n\n # checking the type of filter_shape\n for dim in [0, 1]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n for dim in [2, 3]:\n assert isinstance(filter_shape[dim], (theano.tensor.TensorVariable,\n theano.tensor.TensorConstant,\n integer_types))\n\n # checking the type of input_shape\n if input_shape is not None:\n for dim in [0, 1, 2, 3]:\n assert isinstance(input_shape[dim], (theano.tensor.TensorConstant,\n integer_types, type(None)))\n\n # setting the last two dimensions of filter_shape to None, if\n # the type of these dimensions is TensorVariable.\n numerical_filter_shape = list(filter_shape)\n for dim in [2, 3]:\n if isinstance(filter_shape[dim], theano.tensor.TensorVariable):\n numerical_filter_shape[dim] = None\n\n gradWeight_op = AbstractConv2d_gradWeights(imshp=input_shape,\n kshp=numerical_filter_shape,\n border_mode=border_mode,\n subsample=subsample,\n filter_flip=filter_flip)\n\n return gradWeight_op(input, output_grad, filter_shape[:-2])\n\n\ndef bilinear_kernel_2D(ratio, normalize=True):\n \"\"\"Compute 2D kernel for bilinear upsampling\n\n This function builds the 2D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 2D tensor\n the 2D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in two dimensions.\n\n \"\"\"\n\n hkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle('x', 0)\n vkern = bilinear_kernel_1D(ratio=ratio, normalize=normalize).dimshuffle(0, 'x')\n kern = hkern * vkern\n return kern\n\n\ndef bilinear_kernel_1D(ratio, normalize=True):\n \"\"\"Compute 1D kernel for bilinear upsampling\n\n This function builds the 1D kernel that can be used to upsample\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n ratio: int or Constant/Scalar Theano tensor of int* dtype\n the ratio by which an image will be upsampled by the returned filter\n in the 2D space.\n\n normalize: bool\n param normalize: indicates whether to normalize the kernel or not.\n Default is True.\n\n Returns\n -------\n symbolic 1D tensor\n the 1D kernels that can be applied to any given image to upsample it\n by the indicated ratio using bilinear interpolation in one dimension.\n\n \"\"\"\n\n T = theano.tensor\n half_kern = T.arange(1, ratio + 1, dtype=theano.config.floatX)\n kern = T.concatenate([half_kern, half_kern[-2::-1]])\n\n if normalize:\n kern /= ratio\n return kern\n\n\ndef bilinear_upsampling(input,\n ratio,\n batch_size=None,\n num_input_channels=None,\n use_1D_kernel=True):\n \"\"\"Compute bilinear upsampling\n\n This function will build the symbolic graph for upsampling\n a tensor by the given ratio using bilinear interpolation.\n\n Parameters\n ----------\n input: symbolic 4D tensor\n mini-batch of feature map stacks, of shape (batch size,\n input channels, input rows, input columns) that will be upsampled.\n\n ratio: int or Constant or Scalar Tensor of int* dtype\n the ratio by which the input is upsampled in the 2D space (row and\n col size).\n\n batch_size: None, int or Constant variable\n The size of the first dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n batch_size will be used only if num_input_channels is not None.\n\n num_input_channels: None, int or Constant variable\n The size of the second dimension of the input variable.\n Optional, possibly used to choose an optimal implementation.\n num_input_channels will be used only if batch_size is not None.\n\n use_1D_kernel: bool\n if set to true, row and column will be upsampled seperately by 1D\n kernels, otherwise they are upsampled together using a 2D kernel. The\n final result is the same, only the speed can differ, given factors such\n as upsampling ratio.\n\n Returns\n -------\n symbolic 4D tensor\n set of feature maps generated by bilinear upsampling. Tensor\n is of shape (batch size, num_input_channels, input row size * ratio,\n input column size * ratio)\n\n Notes\n -----\n\n :note: The kernel used for bilinear interpolation is fixed (not learned).\n\n :note: When the upsampling ratio is even, the last row and column is\n repeated one extra time compared to the first row and column which makes\n the upsampled tensor asymmetrical on both sides. This does not happen when\n the upsampling ratio is odd.\n\n \"\"\"\n\n T = theano.tensor\n try:\n up_bs = batch_size * num_input_channels\n except TypeError:\n up_bs = None\n row, col = input.shape[2:]\n up_input = input.reshape((-1, 1, row, col))\n\n # concatenating the first and last row and column\n # first and last row\n concat_mat = T.concatenate((up_input[:, :, :1, :], up_input,\n up_input[:, :, -1:, :]), axis=2)\n # first and last col\n concat_mat = T.concatenate((concat_mat[:, :, :, :1], concat_mat,\n concat_mat[:, :, :, -1:]), axis=3)\n concat_col = col + 2\n\n pad = 2 * ratio - (ratio - 1) // 2 - 1\n\n if use_1D_kernel:\n kern = bilinear_kernel_1D(ratio=ratio, normalize=True)\n # upsampling rows\n upsampled_row = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :,\n np.newaxis],\n input_shape=(up_bs, 1,\n row * ratio,\n concat_col),\n filter_shape=(1, 1, None, 1),\n border_mode=(pad, 0),\n subsample=(ratio, 1),\n filter_flip=True)\n # upsampling cols\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=upsampled_row,\n filters=kern[np.newaxis,\n np.newaxis,\n np.newaxis, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, 1, None),\n border_mode=(0, pad),\n subsample=(1, ratio),\n filter_flip=True)\n else:\n kern = bilinear_kernel_2D(ratio=ratio, normalize=True)\n upsampled_mat = conv2d_grad_wrt_inputs(output_grad=concat_mat,\n filters=kern[np.newaxis,\n np.newaxis, :, :],\n input_shape=(up_bs, 1,\n row * ratio,\n col * ratio),\n filter_shape=(1, 1, None, None),\n border_mode=(pad, pad),\n subsample=(ratio, ratio),\n filter_flip=True)\n\n return upsampled_mat.reshape((input.shape[0], input.shape[1],\n row * ratio, col * ratio))\n\n\nclass BaseAbstractConv2d(Op):\n \"\"\"Base class for AbstractConv\n\n Define an abstract convolution op that will be replaced with the\n appropriate implementation\n\n Parameters\n ----------\n imshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the input parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n imshp is defined w.r.t the forward conv.\n\n kshp: None, tuple/list of len 4 of int or Constant variable\n The shape of the filters parameter.\n Optional, possibly used to choose an optimal implementation.\n You can give ``None`` for any element of the list to specify that this\n element is not known at compile time.\n kshp is defined w.r.t the forward conv.\n\n border_mode: str, int or tuple of two int\n Either of the following:\n\n ``'valid'``: apply filter wherever it completely overlaps with the\n input. Generates output of shape: input shape - filter shape + 1\n ``'full'``: apply filter wherever it partly overlaps with the input.\n Generates output of shape: input shape + filter shape - 1\n ``'half'``: pad input with a symmetric border of ``filter rows // 2``\n rows and ``filter columns // 2`` columns, then perform a valid\n convolution. For filters with an odd number of rows and columns, this\n leads to the output shape being equal to the input shape.\n ``int``: pad input with a symmetric border of zeros of the given\n width, then perform a valid convolution.\n ``(int1, int2)``: pad input with a symmetric border of ``int1`` rows\n and ``int2`` columns, then perform a valid convolution.\n\n subsample: tuple of len 2\n Factor by which to subsample the output.\n Also called strides elsewhere.\n\n filter_flip: bool\n If ``True``, will flip the filter rows and columns\n before sliding them over the input. This operation is normally referred\n to as a convolution, and this is the default. If ``False``, the filters\n are not flipped and the operation is referred to as a\n cross-correlation.\n\n \"\"\"\n check_broadcast = False\n __props__ = ('border_mode', 'subsample', 'filter_flip', 'imshp', 'kshp')\n\n def __init__(self,\n imshp=None, kshp=None,\n border_mode=\"valid\", subsample=(1, 1),\n filter_flip=True):\n\n if isinstance(border_mode, integer_types):\n border_mode = (border_mode, border_mode)\n if isinstance(border_mode, tuple):\n pad_h, pad_w = map(int, border_mode)\n border_mode = (pad_h, pad_w)\n if border_mode == (0, 0):\n border_mode = 'valid'\n if not ((isinstance(border_mode, tuple) and min(border_mode) >= 0) or\n border_mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(border_mode))\n\n self.imshp = tuple(imshp) if imshp else (None,) * 4\n for imshp_i in self.imshp:\n if imshp_i is not None:\n # Components of imshp should be constant or ints\n try:\n get_scalar_constant_value(imshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"imshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.kshp = tuple(kshp) if kshp else (None,) * 4\n for kshp_i in self.kshp:\n if kshp_i is not None:\n # Components of kshp should be constant or ints\n try:\n get_scalar_constant_value(kshp_i,\n only_process_constants=True)\n except NotScalarConstantError:\n reraise(ValueError,\n ValueError(\"kshp should be None or a tuple of \"\n \"constant int values\"),\n sys.exc_info()[2])\n self.border_mode = border_mode\n self.filter_flip = filter_flip\n\n if len(subsample) != 2:\n raise ValueError(\"subsample must have two elements\")\n self.subsample = tuple(subsample)\n\n def flops(self, inp, outp):\n \"\"\" Useful with the hack in profilemode to print the MFlops\"\"\"\n # if the output shape is correct, then this gives the correct\n # flops for any direction, sampling, padding, and border mode\n inputs, filters = inp\n outputs, = outp\n assert inputs[1] == filters[1]\n # nb mul and add by output pixel\n flops = filters[2] * filters[3] * 2\n # nb flops by output image\n flops *= outputs[2] * outputs[3]\n # nb patch multiplied\n flops *= inputs[1] * filters[0] * inputs[0]\n return flops\n\n def do_constant_folding(self, node):\n # Disable constant folding since there is no implementation.\n # This may change in the future.\n return False\n\n def conv2d(self, img, kern, mode=\"valid\"):\n \"\"\"\n Basic slow python implementatation for DebugMode\n \"\"\"\n\n if not imported_scipy_signal:\n raise NotImplementedError(\n \"AbstractConv perform requires the python package\"\n \" for scipy.signal to be installed.\")\n if not (mode in ('valid', 'full')):\n raise ValueError(\n 'invalid mode {}, which must be either '\n '\"valid\" or \"full\"'.format(mode))\n\n out_shape = get_conv_output_shape(img.shape, kern.shape, mode, [1, 1])\n out = numpy.zeros(out_shape, dtype=img.dtype)\n val = _valfrommode(mode)\n bval = _bvalfromboundary('fill')\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', numpy.ComplexWarning)\n for b in xrange(img.shape[0]):\n for n in xrange(kern.shape[0]):\n for im0 in xrange(img.shape[1]):\n # some cast generates a warning here\n out[b, n, ...] += _convolve2d(img[b, im0, ...],\n kern[n, im0, ...],\n 1, val, bval, 0)\n return out\n\n\nclass AbstractConv2d(BaseAbstractConv2d):\n \"\"\" Abstract Op for the forward convolution.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d, self).__init__(imshp, kshp,\n border_mode, subsample,\n filter_flip)\n\n def make_node(self, img, kern):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n ktype = img.type.clone(dtype=kern.dtype,\n broadcastable=kern.broadcastable)\n kern = ktype.filter_variable(kern)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n\n broadcastable = [img.broadcastable[0],\n kern.broadcastable[0],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, kern], [output])\n\n def perform(self, node, inp, out_):\n img, kern = inp\n img = numpy.asarray(img)\n kern = numpy.asarray(kern)\n o, = out_\n mode = self.border_mode\n\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n mode = (kern.shape[2] // 2, kern.shape[3] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n if not self.filter_flip:\n kern = kern[:, :, ::-1, ::-1]\n conv_out = self.conv2d(img, kern, mode=\"valid\")\n conv_out = conv_out[:, :, ::self.subsample[0], ::self.subsample[1]]\n\n o[0] = node.outputs[0].type.filter(conv_out)\n\n def R_op(self, inputs, eval_points):\n rval = None\n if eval_points[0] is not None:\n rval = self.make_node(eval_points[0], inputs[1]).outputs[0]\n if eval_points[1] is not None:\n if rval is None:\n rval = self.make_node(inputs[0], eval_points[1]).outputs[0]\n else:\n rval += self.make_node(inputs[0], eval_points[1]).outputs[0]\n return [rval]\n\n def grad(self, inp, grads):\n bottom, weights = inp\n top, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights, top, bottom.shape[-2:])\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n\n bottom, top, weights.shape[-2:])\n\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n return d_bottom, d_weights\n\n def infer_shape(self, node, input_shapes):\n imshp = input_shapes[0]\n kshp = input_shapes[1]\n\n # replace symbolic shapes with known constant shapes\n if self.imshp is not None:\n imshp = [imshp[i] if self.imshp[i] is None else self.imshp[i]\n for i in range(4)]\n if self.kshp is not None:\n kshp = [kshp[i] if self.kshp[i] is None else self.kshp[i]\n for i in range(4)]\n res = get_conv_output_shape(imshp, kshp, self.border_mode,\n self.subsample)\n return [res]\n\n\nclass AbstractConv2d_gradWeights(BaseAbstractConv2d):\n \"\"\"Gradient wrt. filters for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradWeights, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, img, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(img, theano.Variable):\n img = as_tensor_variable(img)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = img.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if img.type.ndim != 4:\n raise TypeError('img must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.broadcastable[1],\n img.broadcastable[1],\n False, False]\n output = img.type.clone(broadcastable=broadcastable)()\n return Apply(self, [img, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n img, topgrad, shape = inp\n img = numpy.asarray(img)\n topgrad = numpy.asarray(topgrad)\n\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n if mode == \"full\":\n mode = (shape[0] - 1, shape[1] - 1)\n elif mode == \"half\":\n mode = (shape[0] // 2, shape[1] // 2)\n if isinstance(mode, tuple):\n pad_h, pad_w = map(int, mode)\n mode = \"valid\"\n new_img = numpy.zeros((img.shape[0], img.shape[1],\n img.shape[2] + 2 * pad_h,\n img.shape[3] + 2 * pad_w), dtype=img.dtype)\n new_img[:, :, pad_h:img.shape[2] + pad_h, pad_w:img.shape[3] + pad_w] = img\n img = new_img\n\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n img.shape[2] - shape[0] + 1,\n img.shape[3] - shape[1] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n\n topgrad = topgrad.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n img = img.transpose(1, 0, 2, 3)\n kern = self.conv2d(img, topgrad, mode=\"valid\")\n if self.filter_flip:\n kern = kern.transpose(1, 0, 2, 3)[:, :, ::-1, ::-1]\n else:\n kern = kern.transpose(1, 0, 2, 3)\n o[0] = node.outputs[0].type.filter(kern)\n\n def grad(self, inp, grads):\n bottom, top = inp[:2]\n weights, = grads\n d_bottom = AbstractConv2d_gradInputs(self.imshp, self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(\n weights,\n top,\n bottom.shape[-2:])\n d_top = AbstractConv2d(self.imshp,\n self.kshp,\n self.border_mode,\n self.subsample,\n self.filter_flip)(bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_bottom = patternbroadcast(d_bottom, bottom.broadcastable)\n d_bottom = bottom.type.filter_variable(d_bottom)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_bottom, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.kshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the kernel shape\n # from the shapes of inputs.\n imshp = input_shapes[0]\n topshp = input_shapes[1]\n kshp = self.kshp[:] if self.kshp is not None else [None] * 4\n fallback_kshp = [topshp[1], imshp[1], node.inputs[2][0], node.inputs[2][1]]\n kshp = [fallback_kshp[i] if kshp[i] is None else kshp[i]\n for i in range(4)]\n return [kshp]\n\n\nclass AbstractConv2d_gradInputs(BaseAbstractConv2d):\n \"\"\"Gradient wrt. inputs for `AbstractConv2d`.\n Refer to :func:`BaseAbstractConv2d <theano.tensor.nnet.abstract_conv.BaseAbstractConv2d>`\n for a more detailed documentation.\n\n :note: You will not want to use this directly, but rely on\n Theano's automatic differentiation or graph optimization to\n use it as needed.\n\n \"\"\"\n\n def __init__(self,\n imshp=None,\n kshp=None,\n border_mode=\"valid\",\n subsample=(1, 1),\n filter_flip=True):\n super(AbstractConv2d_gradInputs, self).__init__(imshp, kshp,\n border_mode,\n subsample,\n filter_flip)\n\n # Update shape/height_width\n def make_node(self, kern, topgrad, shape):\n # Make sure both inputs are Variables with the same Type\n if not isinstance(kern, theano.Variable):\n kern = as_tensor_variable(kern)\n if not isinstance(topgrad, theano.Variable):\n topgrad = as_tensor_variable(topgrad)\n gtype = kern.type.clone(dtype=topgrad.dtype,\n broadcastable=topgrad.broadcastable)\n topgrad = gtype.filter_variable(topgrad)\n\n if kern.type.ndim != 4:\n raise TypeError('kern must be 4D tensor')\n if topgrad.type.ndim != 4:\n raise TypeError('topgrad must be 4D tensor')\n\n shape = as_tensor_variable(shape)\n broadcastable = [topgrad.type.broadcastable[0],\n kern.type.broadcastable[1],\n False, False]\n output = kern.type.clone(broadcastable=broadcastable)()\n return Apply(self, [kern, topgrad, shape], [output])\n\n def perform(self, node, inp, out_):\n kern, topgrad, shape = inp\n kern = numpy.asarray(kern)\n topgrad = numpy.asarray(topgrad)\n o, = out_\n\n mode = self.border_mode\n if not ((isinstance(mode, tuple) and min(mode) >= 0) or\n mode in ('valid', 'full', 'half')):\n raise ValueError(\n 'invalid border_mode {}, which must be either '\n '\"valid\", \"full\", \"half\", an integer or a pair of'\n ' integers'.format(mode))\n\n pad_h, pad_w = 0, 0\n if mode == \"full\":\n pad_h, pad_w = (kern.shape[2] - 1, kern.shape[3] - 1)\n elif mode == \"half\":\n pad_h, pad_w = (kern.shape[2] // 2, kern.shape[3] // 2)\n elif isinstance(mode, tuple):\n pad_h, pad_w = map(int, self.border_mode)\n if self.subsample[0] > 1 or self.subsample[1] > 1:\n new_shape = (topgrad.shape[0], topgrad.shape[1],\n shape[0] + 2 * pad_h - kern.shape[2] + 1,\n shape[1] + 2 * pad_w - kern.shape[3] + 1)\n new_topgrad = numpy.zeros((new_shape), dtype=topgrad.dtype)\n new_topgrad[:, :, ::self.subsample[0], ::self.subsample[1]] = topgrad\n topgrad = new_topgrad\n kern = kern.transpose(1, 0, 2, 3)\n if self.filter_flip:\n topgrad = topgrad[:, :, ::-1, ::-1]\n img = self.conv2d(topgrad, kern, mode=\"full\")\n if self.filter_flip:\n img = img[:, :, ::-1, ::-1]\n if pad_h > 0 or pad_w > 0:\n img = img[:, :, pad_h:img.shape[2] - pad_h, pad_w:img.shape[3] - pad_w]\n o[0] = node.outputs[0].type.filter(img)\n\n def grad(self, inp, grads):\n weights, top = inp[:2]\n bottom, = grads\n d_weights = AbstractConv2d_gradWeights(self.imshp, self.kshp,\n self.border_mode,\n self.subsample)(\n bottom, top,\n weights.shape[-2:])\n d_top = AbstractConv2d(self.imshp, self.kshp,\n self.border_mode, self.subsample)(\n bottom, weights)\n # Make sure that the broadcastable pattern of the inputs is used\n # for the gradients, even if the grad opts are not able to infer\n # that the dimensions are broadcastable.\n # Also make sure that the gradient lives on the same device than\n # the corresponding input.\n d_weights = patternbroadcast(d_weights, weights.broadcastable)\n d_weights = weights.type.filter_variable(d_weights)\n d_top = patternbroadcast(d_top, top.broadcastable)\n d_top = top.type.filter_variable(d_top)\n\n d_height_width = (theano.gradient.DisconnectedType()(),)\n return (d_weights, d_top) + d_height_width\n\n def connection_pattern(self, node):\n return [[1], [1], [0]] # no connection to height, width\n\n def infer_shape(self, node, input_shapes):\n # We use self.imshp (that was passed when creating the Op) if possible,\n # or fall back to the `shape` input of the node.\n # TODO: when there is no subsampling, try to infer the image shape\n # from the shapes of inputs.\n kshp = input_shapes[0]\n topshp = input_shapes[1]\n imshp = self.imshp[:] if self.imshp is not None else [None] * 4\n fallback_imshp = [topshp[0], kshp[1], node.inputs[2][0],\n node.inputs[2][1]]\n imshp = [fallback_imshp[i] if imshp[i] is None else imshp[i]\n for i in range(4)]\n return [imshp]\n",
"path": "theano/tensor/nnet/abstract_conv.py"
}
] | diff --git a/theano/tensor/nnet/abstract_conv.py b/theano/tensor/nnet/abstract_conv.py
index c3da886ffd5..065102451a8 100644
--- a/theano/tensor/nnet/abstract_conv.py
+++ b/theano/tensor/nnet/abstract_conv.py
@@ -567,7 +567,7 @@ def bilinear_upsampling(input,
subsample=(ratio, ratio),
filter_flip=True)
- return upsampled_mat.reshape((batch_size, num_input_channels,
+ return upsampled_mat.reshape((input.shape[0], input.shape[1],
row * ratio, col * ratio))
diff --git a/theano/tensor/nnet/tests/test_abstract_conv.py b/theano/tensor/nnet/tests/test_abstract_conv.py
index 7abd65a2383..887189b993e 100644
--- a/theano/tensor/nnet/tests/test_abstract_conv.py
+++ b/theano/tensor/nnet/tests/test_abstract_conv.py
@@ -687,6 +687,23 @@ def test_bilinear_upsampling_1D(self):
up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio)
utt.assert_allclose(f(), up_mat_2d, rtol=1e-06)
+ def test_bilinear_upsampling_reshaping(self):
+ # Test bilinear upsampling without giving shape information
+ # This method tests the bilinear_upsampling method
+ # without giving batch_size and num_input_channels
+ # upsampling for a ratio of two
+ input_x = np.array([[[[1, 2], [3, 4]]]], dtype=theano.config.floatX)
+
+ for ratio in [2, 3]:
+ for use_1D_kernel in [True, False]:
+ bilin_mat = bilinear_upsampling(input=input_x, ratio=ratio,
+ batch_size=None,
+ num_input_channels=None,
+ use_1D_kernel=use_1D_kernel)
+ f = theano.function([], bilin_mat, mode=self.compile_mode)
+ up_mat_2d = self.get_upsampled_twobytwo_mat(input_x, ratio)
+ utt.assert_allclose(f(), up_mat_2d, rtol=1e-06)
+
def test_compare_1D_and_2D_upsampling_values(self):
"""Compare 1D and 2D upsampling
| Bilinear_Upsampling requires to specify batch_size and num_input_channels
Referring to this line:
https://github.com/Theano/Theano/blob/master/theano/tensor/nnet/abstract_conv.py#L570
this piece of code doesn't work with `batch_size` and/or `num_input_channels` set to `None`
it requires to specify the dimensions as scalar values
|
openshift__openshift-ansible-3914 | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n'''\nCustom filters for use in openshift-master\n'''\nimport copy\nimport sys\n\n# pylint import-error disabled because pylint cannot find the package\n# when installed in a virtualenv\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom ansible import errors\nfrom ansible.parsing.yaml.dumper import AnsibleDumper\nfrom ansible.plugins.filter.core import to_bool as ansible_bool\n\n# ansible.compat.six goes away with Ansible 2.4\ntry:\n from ansible.compat.six import string_types, u\nexcept ImportError:\n from ansible.module_utils.six import string_types, u\n\nimport yaml\n\n\nclass IdentityProviderBase(object):\n \"\"\" IdentityProviderBase\n\n Attributes:\n name (str): Identity provider Name\n login (bool): Is this identity provider a login provider?\n challenge (bool): Is this identity provider a challenge provider?\n provider (dict): Provider specific config\n _idp (dict): internal copy of the IDP dict passed in\n _required (list): List of lists of strings for required attributes\n _optional (list): List of lists of strings for optional attributes\n _allow_additional (bool): Does this provider support attributes\n not in _required and _optional\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n # disabling this check since the number of instance attributes are\n # necessary for this class\n # pylint: disable=too-many-instance-attributes\n def __init__(self, api_version, idp):\n if api_version not in ['v1']:\n raise errors.AnsibleFilterError(\"|failed api version {0} unknown\".format(api_version))\n\n self._idp = copy.deepcopy(idp)\n\n if 'name' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a name\")\n\n if 'kind' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a kind\")\n\n self.name = self._idp.pop('name')\n self.login = ansible_bool(self._idp.pop('login', False))\n self.challenge = ansible_bool(self._idp.pop('challenge', False))\n self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))\n\n mm_keys = ('mappingMethod', 'mapping_method')\n mapping_method = None\n for key in mm_keys:\n if key in self._idp:\n mapping_method = self._idp.pop(key)\n if mapping_method is None:\n mapping_method = self.get_default('mappingMethod')\n self.mapping_method = mapping_method\n\n valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']\n if self.mapping_method not in valid_mapping_methods:\n raise errors.AnsibleFilterError(\"|failed unknown mapping method \"\n \"for provider {0}\".format(self.__class__.__name__))\n self._required = []\n self._optional = []\n self._allow_additional = True\n\n @staticmethod\n def validate_idp_list(idp_list, openshift_version, deployment_type):\n ''' validates a list of idps '''\n login_providers = [x.name for x in idp_list if x.login]\n\n multiple_logins_unsupported = False\n if len(login_providers) > 1:\n if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:\n if LooseVersion(openshift_version) < LooseVersion('3.2'):\n multiple_logins_unsupported = True\n if deployment_type in ['origin']:\n if LooseVersion(openshift_version) < LooseVersion('1.2'):\n multiple_logins_unsupported = True\n if multiple_logins_unsupported:\n raise errors.AnsibleFilterError(\"|failed multiple providers are \"\n \"not allowed for login. login \"\n \"providers: {0}\".format(', '.join(login_providers)))\n\n names = [x.name for x in idp_list]\n if len(set(names)) != len(names):\n raise errors.AnsibleFilterError(\"|failed more than one provider configured with the same name\")\n\n for idp in idp_list:\n idp.validate()\n\n def validate(self):\n ''' validate an instance of this idp class '''\n pass\n\n @staticmethod\n def get_default(key):\n ''' get a default value for a given key '''\n if key == 'mappingMethod':\n return 'claim'\n else:\n return None\n\n def set_provider_item(self, items, required=False):\n ''' set a provider item based on the list of item names provided. '''\n for item in items:\n provider_key = items[0]\n if item in self._idp:\n self.provider[provider_key] = self._idp.pop(item)\n break\n else:\n default = self.get_default(provider_key)\n if default is not None:\n self.provider[provider_key] = default\n elif required:\n raise errors.AnsibleFilterError(\"|failed provider {0} missing \"\n \"required key {1}\".format(self.__class__.__name__, provider_key))\n\n def set_provider_items(self):\n ''' set the provider items for this idp '''\n for items in self._required:\n self.set_provider_item(items, True)\n for items in self._optional:\n self.set_provider_item(items)\n if self._allow_additional:\n for key in self._idp.keys():\n self.set_provider_item([key])\n else:\n if len(self._idp) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} \"\n \"contains unknown keys \"\n \"{1}\".format(self.__class__.__name__, ', '.join(self._idp.keys())))\n\n def to_dict(self):\n ''' translate this idp to a dictionary '''\n return dict(name=self.name, challenge=self.challenge,\n login=self.login, mappingMethod=self.mapping_method,\n provider=self.provider)\n\n\nclass LDAPPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" LDAPPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['attributes'], ['url'], ['insecure']]\n self._optional += [['ca'],\n ['bindDN', 'bind_dn'],\n ['bindPassword', 'bind_password']]\n\n self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))\n\n if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:\n pref_user = self._idp['attributes'].pop('preferred_username')\n self._idp['attributes']['preferredUsername'] = pref_user\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['attributes'], dict):\n raise errors.AnsibleFilterError(\"|failed attributes for provider \"\n \"{0} must be a dictionary\".format(self.__class__.__name__))\n\n attrs = ['id', 'email', 'name', 'preferredUsername']\n for attr in attrs:\n if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):\n raise errors.AnsibleFilterError(\"|failed {0} attribute for \"\n \"provider {1} must be a list\".format(attr, self.__class__.__name__))\n\n unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)\n if len(unknown_attrs) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"attributes: {1}\".format(self.__class__.__name__, ', '.join(unknown_attrs)))\n\n\nclass KeystonePasswordIdentityProvider(IdentityProviderBase):\n \"\"\" KeystoneIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url'], ['domainName', 'domain_name']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass RequestHeaderIdentityProvider(IdentityProviderBase):\n \"\"\" RequestHeaderIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['headers']]\n self._optional += [['challengeURL', 'challenge_url'],\n ['loginURL', 'login_url'],\n ['clientCA', 'client_ca'],\n ['clientCommonNames', 'client_common_names'],\n ['emailHeaders', 'email_headers'],\n ['nameHeaders', 'name_headers'],\n ['preferredUsernameHeaders', 'preferred_username_headers']]\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['headers'], list):\n raise errors.AnsibleFilterError(\"|failed headers for provider {0} \"\n \"must be a list\".format(self.__class__.__name__))\n\n\nclass AllowAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" AllowAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass DenyAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" DenyAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass HTPasswdPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" HTPasswdPasswordIdentity\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['file', 'filename', 'fileName', 'file_name']]\n\n @staticmethod\n def get_default(key):\n if key == 'file':\n return '/etc/origin/htpasswd'\n else:\n return IdentityProviderBase.get_default(key)\n\n\nclass BasicAuthPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" BasicAuthPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass IdentityProviderOauthBase(IdentityProviderBase):\n \"\"\" IdentityProviderOauthBase\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(IdentityProviderOauthBase, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]\n\n def validate(self):\n ''' validate this idp instance '''\n if self.challenge:\n raise errors.AnsibleFilterError(\"|failed provider {0} does not \"\n \"allow challenge authentication\".format(self.__class__.__name__))\n\n\nclass OpenIDIdentityProvider(IdentityProviderOauthBase):\n \"\"\" OpenIDIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._required += [['claims'], ['urls']]\n self._optional += [['ca'],\n ['extraScopes'],\n ['extraAuthorizeParameters']]\n if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:\n pref_user = self._idp['claims'].pop('preferred_username')\n self._idp['claims']['preferredUsername'] = pref_user\n if 'urls' in self._idp and 'user_info' in self._idp['urls']:\n user_info = self._idp['urls'].pop('user_info')\n self._idp['urls']['userInfo'] = user_info\n if 'extra_scopes' in self._idp:\n self._idp['extraScopes'] = self._idp.pop('extra_scopes')\n if 'extra_authorize_parameters' in self._idp:\n self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')\n\n if 'extraAuthorizeParameters' in self._idp:\n if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:\n val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))\n self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val\n\n def validate(self):\n ''' validate this idp instance '''\n IdentityProviderOauthBase.validate(self)\n if not isinstance(self.provider['claims'], dict):\n raise errors.AnsibleFilterError(\"|failed claims for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):\n if var in self.provider and not isinstance(self.provider[var], var_type):\n raise errors.AnsibleFilterError(\"|failed {1} for provider \"\n \"{0} must be a {2}\".format(self.__class__.__name__,\n var,\n var_type.__class__.__name__))\n\n required_claims = ['id']\n optional_claims = ['email', 'name', 'preferredUsername']\n all_claims = required_claims + optional_claims\n\n for claim in required_claims:\n if claim in required_claims and claim not in self.provider['claims']:\n raise errors.AnsibleFilterError(\"|failed {0} claim missing \"\n \"for provider {1}\".format(claim, self.__class__.__name__))\n\n for claim in all_claims:\n if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):\n raise errors.AnsibleFilterError(\"|failed {0} claims for \"\n \"provider {1} must be a list\".format(claim, self.__class__.__name__))\n\n unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)\n if len(unknown_claims) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"claims: {1}\".format(self.__class__.__name__, ', '.join(unknown_claims)))\n\n if not isinstance(self.provider['urls'], dict):\n raise errors.AnsibleFilterError(\"|failed urls for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n required_urls = ['authorize', 'token']\n optional_urls = ['userInfo']\n all_urls = required_urls + optional_urls\n\n for url in required_urls:\n if url not in self.provider['urls']:\n raise errors.AnsibleFilterError(\"|failed {0} url missing for \"\n \"provider {1}\".format(url, self.__class__.__name__))\n\n unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)\n if len(unknown_urls) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"urls: {1}\".format(self.__class__.__name__, ', '.join(unknown_urls)))\n\n\nclass GoogleIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GoogleIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['hostedDomain', 'hosted_domain']]\n\n\nclass GitHubIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GitHubIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['organizations']]\n\n\nclass FilterModule(object):\n ''' Custom ansible filters for use by the openshift_master role'''\n\n @staticmethod\n def translate_idps(idps, api_version, openshift_version, deployment_type):\n ''' Translates a list of dictionaries into a valid identityProviders config '''\n idp_list = []\n\n if not isinstance(idps, list):\n raise errors.AnsibleFilterError(\"|failed expects to filter on a list of identity providers\")\n for idp in idps:\n if not isinstance(idp, dict):\n raise errors.AnsibleFilterError(\"|failed identity providers must be a list of dictionaries\")\n\n cur_module = sys.modules[__name__]\n idp_class = getattr(cur_module, idp['kind'], None)\n idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)\n idp_inst.set_provider_items()\n idp_list.append(idp_inst)\n\n IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)\n return u(yaml.dump([idp.to_dict() for idp in idp_list],\n allow_unicode=True,\n default_flow_style=False,\n Dumper=AnsibleDumper))\n\n @staticmethod\n def validate_pcs_cluster(data, masters=None):\n ''' Validates output from \"pcs status\", ensuring that each master\n provided is online.\n Ex: data = ('...',\n 'PCSD Status:',\n 'master1.example.com: Online',\n 'master2.example.com: Online',\n 'master3.example.com: Online',\n '...')\n masters = ['master1.example.com',\n 'master2.example.com',\n 'master3.example.com']\n returns True\n '''\n if not issubclass(type(data), string_types):\n raise errors.AnsibleFilterError(\"|failed expects data is a string or unicode\")\n if not issubclass(type(masters), list):\n raise errors.AnsibleFilterError(\"|failed expects masters is a list\")\n valid = True\n for master in masters:\n if \"{0}: Online\".format(master) not in data:\n valid = False\n return valid\n\n @staticmethod\n def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):\n ''' Return certificates to synchronize based on facts. '''\n if not issubclass(type(hostvars), dict):\n raise errors.AnsibleFilterError(\"|failed expects hostvars is a dict\")\n certs = ['admin.crt',\n 'admin.key',\n 'admin.kubeconfig',\n 'master.kubelet-client.crt',\n 'master.kubelet-client.key']\n if bool(include_ca):\n certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']\n if bool(include_keys):\n certs += ['serviceaccounts.private.key',\n 'serviceaccounts.public.key']\n if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):\n certs += ['master.proxy-client.crt',\n 'master.proxy-client.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):\n certs += ['openshift-master.crt',\n 'openshift-master.key',\n 'openshift-master.kubeconfig']\n if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):\n certs += ['service-signer.crt',\n 'service-signer.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):\n certs += ['openshift-registry.crt',\n 'openshift-registry.key',\n 'openshift-registry.kubeconfig',\n 'openshift-router.crt',\n 'openshift-router.key',\n 'openshift-router.kubeconfig']\n return certs\n\n @staticmethod\n def oo_htpasswd_users_from_file(file_contents):\n ''' return a dictionary of htpasswd users from htpasswd file contents '''\n htpasswd_entries = {}\n if not isinstance(file_contents, string_types):\n raise errors.AnsibleFilterError(\"failed, expects to filter on a string\")\n for line in file_contents.splitlines():\n user = None\n passwd = None\n if len(line) == 0:\n continue\n if ':' in line:\n user, passwd = line.split(':', 1)\n\n if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:\n error_msg = \"failed, expects each line to be a colon separated string representing the user and passwd\"\n raise errors.AnsibleFilterError(error_msg)\n htpasswd_entries[user] = passwd\n return htpasswd_entries\n\n def filters(self):\n ''' returns a mapping of filters to methods '''\n return {\"translate_idps\": self.translate_idps,\n \"validate_pcs_cluster\": self.validate_pcs_cluster,\n \"certificates_to_synchronize\": self.certificates_to_synchronize,\n \"oo_htpasswd_users_from_file\": self.oo_htpasswd_users_from_file}\n",
"path": "roles/openshift_master_facts/filter_plugins/openshift_master.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: expandtab:tabstop=4:shiftwidth=4\n'''\nCustom filters for use in openshift-master\n'''\nimport copy\nimport sys\n\n# pylint import-error disabled because pylint cannot find the package\n# when installed in a virtualenv\nfrom distutils.version import LooseVersion # pylint: disable=no-name-in-module,import-error\n\nfrom ansible import errors\nfrom ansible.parsing.yaml.dumper import AnsibleDumper\nfrom ansible.plugins.filter.core import to_bool as ansible_bool\n\n# ansible.compat.six goes away with Ansible 2.4\ntry:\n from ansible.compat.six import string_types, u\nexcept ImportError:\n from ansible.module_utils.six import string_types, u\n\nimport yaml\n\n\nclass IdentityProviderBase(object):\n \"\"\" IdentityProviderBase\n\n Attributes:\n name (str): Identity provider Name\n login (bool): Is this identity provider a login provider?\n challenge (bool): Is this identity provider a challenge provider?\n provider (dict): Provider specific config\n _idp (dict): internal copy of the IDP dict passed in\n _required (list): List of lists of strings for required attributes\n _optional (list): List of lists of strings for optional attributes\n _allow_additional (bool): Does this provider support attributes\n not in _required and _optional\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n # disabling this check since the number of instance attributes are\n # necessary for this class\n # pylint: disable=too-many-instance-attributes\n def __init__(self, api_version, idp):\n if api_version not in ['v1']:\n raise errors.AnsibleFilterError(\"|failed api version {0} unknown\".format(api_version))\n\n self._idp = copy.deepcopy(idp)\n\n if 'name' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a name\")\n\n if 'kind' not in self._idp:\n raise errors.AnsibleFilterError(\"|failed identity provider missing a kind\")\n\n self.name = self._idp.pop('name')\n self.login = ansible_bool(self._idp.pop('login', False))\n self.challenge = ansible_bool(self._idp.pop('challenge', False))\n self.provider = dict(apiVersion=api_version, kind=self._idp.pop('kind'))\n\n mm_keys = ('mappingMethod', 'mapping_method')\n mapping_method = None\n for key in mm_keys:\n if key in self._idp:\n mapping_method = self._idp.pop(key)\n if mapping_method is None:\n mapping_method = self.get_default('mappingMethod')\n self.mapping_method = mapping_method\n\n valid_mapping_methods = ['add', 'claim', 'generate', 'lookup']\n if self.mapping_method not in valid_mapping_methods:\n raise errors.AnsibleFilterError(\"|failed unknown mapping method \"\n \"for provider {0}\".format(self.__class__.__name__))\n self._required = []\n self._optional = []\n self._allow_additional = True\n\n @staticmethod\n def validate_idp_list(idp_list, openshift_version, deployment_type):\n ''' validates a list of idps '''\n login_providers = [x.name for x in idp_list if x.login]\n\n multiple_logins_unsupported = False\n if len(login_providers) > 1:\n if deployment_type in ['enterprise', 'online', 'atomic-enterprise', 'openshift-enterprise']:\n if LooseVersion(openshift_version) < LooseVersion('3.2'):\n multiple_logins_unsupported = True\n if deployment_type in ['origin']:\n if LooseVersion(openshift_version) < LooseVersion('1.2'):\n multiple_logins_unsupported = True\n if multiple_logins_unsupported:\n raise errors.AnsibleFilterError(\"|failed multiple providers are \"\n \"not allowed for login. login \"\n \"providers: {0}\".format(', '.join(login_providers)))\n\n names = [x.name for x in idp_list]\n if len(set(names)) != len(names):\n raise errors.AnsibleFilterError(\"|failed more than one provider configured with the same name\")\n\n for idp in idp_list:\n idp.validate()\n\n def validate(self):\n ''' validate an instance of this idp class '''\n pass\n\n @staticmethod\n def get_default(key):\n ''' get a default value for a given key '''\n if key == 'mappingMethod':\n return 'claim'\n else:\n return None\n\n def set_provider_item(self, items, required=False):\n ''' set a provider item based on the list of item names provided. '''\n for item in items:\n provider_key = items[0]\n if item in self._idp:\n self.provider[provider_key] = self._idp.pop(item)\n break\n else:\n default = self.get_default(provider_key)\n if default is not None:\n self.provider[provider_key] = default\n elif required:\n raise errors.AnsibleFilterError(\"|failed provider {0} missing \"\n \"required key {1}\".format(self.__class__.__name__, provider_key))\n\n def set_provider_items(self):\n ''' set the provider items for this idp '''\n for items in self._required:\n self.set_provider_item(items, True)\n for items in self._optional:\n self.set_provider_item(items)\n if self._allow_additional:\n for key in self._idp.keys():\n self.set_provider_item([key])\n else:\n if len(self._idp) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} \"\n \"contains unknown keys \"\n \"{1}\".format(self.__class__.__name__, ', '.join(self._idp.keys())))\n\n def to_dict(self):\n ''' translate this idp to a dictionary '''\n return dict(name=self.name, challenge=self.challenge,\n login=self.login, mappingMethod=self.mapping_method,\n provider=self.provider)\n\n\nclass LDAPPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" LDAPPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(LDAPPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['attributes'], ['url'], ['insecure']]\n self._optional += [['ca'],\n ['bindDN', 'bind_dn'],\n ['bindPassword', 'bind_password']]\n\n self._idp['insecure'] = ansible_bool(self._idp.pop('insecure', False))\n\n if 'attributes' in self._idp and 'preferred_username' in self._idp['attributes']:\n pref_user = self._idp['attributes'].pop('preferred_username')\n self._idp['attributes']['preferredUsername'] = pref_user\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['attributes'], dict):\n raise errors.AnsibleFilterError(\"|failed attributes for provider \"\n \"{0} must be a dictionary\".format(self.__class__.__name__))\n\n attrs = ['id', 'email', 'name', 'preferredUsername']\n for attr in attrs:\n if attr in self.provider['attributes'] and not isinstance(self.provider['attributes'][attr], list):\n raise errors.AnsibleFilterError(\"|failed {0} attribute for \"\n \"provider {1} must be a list\".format(attr, self.__class__.__name__))\n\n unknown_attrs = set(self.provider['attributes'].keys()) - set(attrs)\n if len(unknown_attrs) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"attributes: {1}\".format(self.__class__.__name__, ', '.join(unknown_attrs)))\n\n\nclass KeystonePasswordIdentityProvider(IdentityProviderBase):\n \"\"\" KeystoneIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(KeystonePasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url'], ['domainName', 'domain_name']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass RequestHeaderIdentityProvider(IdentityProviderBase):\n \"\"\" RequestHeaderIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(RequestHeaderIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['headers']]\n self._optional += [['challengeURL', 'challenge_url'],\n ['loginURL', 'login_url'],\n ['clientCA', 'client_ca'],\n ['clientCommonNames', 'client_common_names'],\n ['emailHeaders', 'email_headers'],\n ['nameHeaders', 'name_headers'],\n ['preferredUsernameHeaders', 'preferred_username_headers']]\n\n def validate(self):\n ''' validate this idp instance '''\n if not isinstance(self.provider['headers'], list):\n raise errors.AnsibleFilterError(\"|failed headers for provider {0} \"\n \"must be a list\".format(self.__class__.__name__))\n\n\nclass AllowAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" AllowAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(AllowAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass DenyAllPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" DenyAllPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(DenyAllPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n\n\nclass HTPasswdPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" HTPasswdPasswordIdentity\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(HTPasswdPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['file', 'filename', 'fileName', 'file_name']]\n\n @staticmethod\n def get_default(key):\n if key == 'file':\n return '/etc/origin/htpasswd'\n else:\n return IdentityProviderBase.get_default(key)\n\n\nclass BasicAuthPasswordIdentityProvider(IdentityProviderBase):\n \"\"\" BasicAuthPasswordIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(BasicAuthPasswordIdentityProvider, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['url']]\n self._optional += [['ca'], ['certFile', 'cert_file'], ['keyFile', 'key_file']]\n\n\nclass IdentityProviderOauthBase(IdentityProviderBase):\n \"\"\" IdentityProviderOauthBase\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n super(IdentityProviderOauthBase, self).__init__(api_version, idp)\n self._allow_additional = False\n self._required += [['clientID', 'client_id'], ['clientSecret', 'client_secret']]\n\n def validate(self):\n ''' validate this idp instance '''\n if self.challenge:\n raise errors.AnsibleFilterError(\"|failed provider {0} does not \"\n \"allow challenge authentication\".format(self.__class__.__name__))\n\n\nclass OpenIDIdentityProvider(IdentityProviderOauthBase):\n \"\"\" OpenIDIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._required += [['claims'], ['urls']]\n self._optional += [['ca'],\n ['extraScopes'],\n ['extraAuthorizeParameters']]\n if 'claims' in self._idp and 'preferred_username' in self._idp['claims']:\n pref_user = self._idp['claims'].pop('preferred_username')\n self._idp['claims']['preferredUsername'] = pref_user\n if 'urls' in self._idp and 'user_info' in self._idp['urls']:\n user_info = self._idp['urls'].pop('user_info')\n self._idp['urls']['userInfo'] = user_info\n if 'extra_scopes' in self._idp:\n self._idp['extraScopes'] = self._idp.pop('extra_scopes')\n if 'extra_authorize_parameters' in self._idp:\n self._idp['extraAuthorizeParameters'] = self._idp.pop('extra_authorize_parameters')\n\n if 'extraAuthorizeParameters' in self._idp:\n if 'include_granted_scopes' in self._idp['extraAuthorizeParameters']:\n val = ansible_bool(self._idp['extraAuthorizeParameters'].pop('include_granted_scopes'))\n self._idp['extraAuthorizeParameters']['include_granted_scopes'] = val\n\n def validate(self):\n ''' validate this idp instance '''\n IdentityProviderOauthBase.validate(self)\n if not isinstance(self.provider['claims'], dict):\n raise errors.AnsibleFilterError(\"|failed claims for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n for var, var_type in (('extraScopes', list), ('extraAuthorizeParameters', dict)):\n if var in self.provider and not isinstance(self.provider[var], var_type):\n raise errors.AnsibleFilterError(\"|failed {1} for provider \"\n \"{0} must be a {2}\".format(self.__class__.__name__,\n var,\n var_type.__class__.__name__))\n\n required_claims = ['id']\n optional_claims = ['email', 'name', 'preferredUsername']\n all_claims = required_claims + optional_claims\n\n for claim in required_claims:\n if claim in required_claims and claim not in self.provider['claims']:\n raise errors.AnsibleFilterError(\"|failed {0} claim missing \"\n \"for provider {1}\".format(claim, self.__class__.__name__))\n\n for claim in all_claims:\n if claim in self.provider['claims'] and not isinstance(self.provider['claims'][claim], list):\n raise errors.AnsibleFilterError(\"|failed {0} claims for \"\n \"provider {1} must be a list\".format(claim, self.__class__.__name__))\n\n unknown_claims = set(self.provider['claims'].keys()) - set(all_claims)\n if len(unknown_claims) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"claims: {1}\".format(self.__class__.__name__, ', '.join(unknown_claims)))\n\n if not isinstance(self.provider['urls'], dict):\n raise errors.AnsibleFilterError(\"|failed urls for provider {0} \"\n \"must be a dictionary\".format(self.__class__.__name__))\n\n required_urls = ['authorize', 'token']\n optional_urls = ['userInfo']\n all_urls = required_urls + optional_urls\n\n for url in required_urls:\n if url not in self.provider['urls']:\n raise errors.AnsibleFilterError(\"|failed {0} url missing for \"\n \"provider {1}\".format(url, self.__class__.__name__))\n\n unknown_urls = set(self.provider['urls'].keys()) - set(all_urls)\n if len(unknown_urls) > 0:\n raise errors.AnsibleFilterError(\"|failed provider {0} has unknown \"\n \"urls: {1}\".format(self.__class__.__name__, ', '.join(unknown_urls)))\n\n\nclass GoogleIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GoogleIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['hostedDomain', 'hosted_domain']]\n\n\nclass GitHubIdentityProvider(IdentityProviderOauthBase):\n \"\"\" GitHubIdentityProvider\n\n Attributes:\n\n Args:\n api_version(str): OpenShift config version\n idp (dict): idp config dict\n\n Raises:\n AnsibleFilterError:\n \"\"\"\n def __init__(self, api_version, idp):\n IdentityProviderOauthBase.__init__(self, api_version, idp)\n self._optional += [['organizations']]\n\n\nclass FilterModule(object):\n ''' Custom ansible filters for use by the openshift_master role'''\n\n @staticmethod\n def translate_idps(idps, api_version, openshift_version, deployment_type):\n ''' Translates a list of dictionaries into a valid identityProviders config '''\n idp_list = []\n\n if not isinstance(idps, list):\n raise errors.AnsibleFilterError(\"|failed expects to filter on a list of identity providers\")\n for idp in idps:\n if not isinstance(idp, dict):\n raise errors.AnsibleFilterError(\"|failed identity providers must be a list of dictionaries\")\n\n cur_module = sys.modules[__name__]\n idp_class = getattr(cur_module, idp['kind'], None)\n idp_inst = idp_class(api_version, idp) if idp_class is not None else IdentityProviderBase(api_version, idp)\n idp_inst.set_provider_items()\n idp_list.append(idp_inst)\n\n IdentityProviderBase.validate_idp_list(idp_list, openshift_version, deployment_type)\n return u(yaml.dump([idp.to_dict() for idp in idp_list],\n allow_unicode=True,\n default_flow_style=False,\n width=float(\"inf\"),\n Dumper=AnsibleDumper))\n\n @staticmethod\n def validate_pcs_cluster(data, masters=None):\n ''' Validates output from \"pcs status\", ensuring that each master\n provided is online.\n Ex: data = ('...',\n 'PCSD Status:',\n 'master1.example.com: Online',\n 'master2.example.com: Online',\n 'master3.example.com: Online',\n '...')\n masters = ['master1.example.com',\n 'master2.example.com',\n 'master3.example.com']\n returns True\n '''\n if not issubclass(type(data), string_types):\n raise errors.AnsibleFilterError(\"|failed expects data is a string or unicode\")\n if not issubclass(type(masters), list):\n raise errors.AnsibleFilterError(\"|failed expects masters is a list\")\n valid = True\n for master in masters:\n if \"{0}: Online\".format(master) not in data:\n valid = False\n return valid\n\n @staticmethod\n def certificates_to_synchronize(hostvars, include_keys=True, include_ca=True):\n ''' Return certificates to synchronize based on facts. '''\n if not issubclass(type(hostvars), dict):\n raise errors.AnsibleFilterError(\"|failed expects hostvars is a dict\")\n certs = ['admin.crt',\n 'admin.key',\n 'admin.kubeconfig',\n 'master.kubelet-client.crt',\n 'master.kubelet-client.key']\n if bool(include_ca):\n certs += ['ca.crt', 'ca.key', 'ca-bundle.crt']\n if bool(include_keys):\n certs += ['serviceaccounts.private.key',\n 'serviceaccounts.public.key']\n if bool(hostvars['openshift']['common']['version_gte_3_1_or_1_1']):\n certs += ['master.proxy-client.crt',\n 'master.proxy-client.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_2_or_1_2']):\n certs += ['openshift-master.crt',\n 'openshift-master.key',\n 'openshift-master.kubeconfig']\n if bool(hostvars['openshift']['common']['version_gte_3_3_or_1_3']):\n certs += ['service-signer.crt',\n 'service-signer.key']\n if not bool(hostvars['openshift']['common']['version_gte_3_5_or_1_5']):\n certs += ['openshift-registry.crt',\n 'openshift-registry.key',\n 'openshift-registry.kubeconfig',\n 'openshift-router.crt',\n 'openshift-router.key',\n 'openshift-router.kubeconfig']\n return certs\n\n @staticmethod\n def oo_htpasswd_users_from_file(file_contents):\n ''' return a dictionary of htpasswd users from htpasswd file contents '''\n htpasswd_entries = {}\n if not isinstance(file_contents, string_types):\n raise errors.AnsibleFilterError(\"failed, expects to filter on a string\")\n for line in file_contents.splitlines():\n user = None\n passwd = None\n if len(line) == 0:\n continue\n if ':' in line:\n user, passwd = line.split(':', 1)\n\n if user is None or len(user) == 0 or passwd is None or len(passwd) == 0:\n error_msg = \"failed, expects each line to be a colon separated string representing the user and passwd\"\n raise errors.AnsibleFilterError(error_msg)\n htpasswd_entries[user] = passwd\n return htpasswd_entries\n\n def filters(self):\n ''' returns a mapping of filters to methods '''\n return {\"translate_idps\": self.translate_idps,\n \"validate_pcs_cluster\": self.validate_pcs_cluster,\n \"certificates_to_synchronize\": self.certificates_to_synchronize,\n \"oo_htpasswd_users_from_file\": self.oo_htpasswd_users_from_file}\n",
"path": "roles/openshift_master_facts/filter_plugins/openshift_master.py"
}
] | diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index e570392ffd8..386f544ea06 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -496,6 +496,7 @@ def translate_idps(idps, api_version, openshift_version, deployment_type):
return u(yaml.dump([idp.to_dict() for idp in idp_list],
allow_unicode=True,
default_flow_style=False,
+ width=float("inf"),
Dumper=AnsibleDumper))
@staticmethod
| Long DN string with spaces can cause incorrect YAML to be generated in master-config
#### Description
I have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.
Modifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.
https://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499
##### Version
```
ansible 2.2.1.0
atomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch
openshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch
```
##### Steps To Reproduce
Create a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`
##### Expected Results
master-config.yaml with a DN that is readable by the OpenShift master service.
##### Observed Results
Master service fails to start.
Long DN string with spaces can cause incorrect YAML to be generated in master-config
#### Description
I have a configuration file where my settings for an LDAP identity provider has a long string containing spaces, when this is converted to YAML, line feeds are inserted on some of the longer DN/LDAP filter strings, this results in the master API service failing to start due to invalid config.
Modifying the following `yaml.dump()` to include `width=1000` for example results in a working master-config.yaml.
https://github.com/openshift/openshift-ansible/blob/7496b1235f72bd4241e4917f50df722174bf90fa/roles/openshift_master_facts/filter_plugins/openshift_master.py#L496-L499
##### Version
```
ansible 2.2.1.0
atomic-openshift-utils-3.4.67-1.git.0.14a0b4d.el7.noarch
openshift-ansible-3.4.67-1.git.0.14a0b4d.el7.noarch
```
##### Steps To Reproduce
Create a long LDAP identity provider in openshift_master_identity_providers that has spaces in the DN. eg. `'url':'ldap://url.to.some.ldap.server/DC=blah,DC=foo,DC=bar?uid??(memberof=CN=UserGroupForPaaS,OU=Groups,OU=Unit With Spaces For Some Reason,OU=Also With - In Case,DC=blah,DC=foo,DC=bar)'`
##### Expected Results
master-config.yaml with a DN that is readable by the OpenShift master service.
##### Observed Results
Master service fails to start.
|
beeware__toga-1078 | [
{
"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n",
"path": "src/winforms/toga_winforms/window.py"
}
] | [
{
"content": "from toga import GROUP_BREAK, SECTION_BREAK\n\nfrom .libs import Size, WinForms\n\n\nclass WinFormsViewport:\n def __init__(self, native, frame):\n self.native = native\n self.frame = frame\n self.baseline_dpi = 96\n\n @property\n def width(self):\n # Treat `native=None` as a 0x0 viewport\n if self.native is None:\n return 0\n return self.native.ClientSize.Width\n\n @property\n def height(self):\n if self.native is None:\n return 0\n # Subtract any vertical shift of the frame. This is to allow\n # for toolbars, or any other viewport-level decoration.\n return self.native.ClientSize.Height - self.frame.vertical_shift\n\n @property\n def dpi(self):\n if self.native is None:\n return self.baseline_dpi\n return self.native.CreateGraphics().DpiX\n\n\nclass Window:\n def __init__(self, interface):\n self.interface = interface\n self.interface._impl = self\n self.create()\n\n def create(self):\n self.native = WinForms.Form(self)\n self.native.ClientSize = Size(*self.interface._size)\n self.native.interface = self.interface\n self.native.Resize += self.winforms_resize\n self.toolbar_native = None\n self.toolbar_items = None\n\n def create_toolbar(self):\n self.toolbar_native = WinForms.ToolStrip()\n for cmd in self.interface.toolbar:\n if cmd == GROUP_BREAK:\n item = WinForms.ToolStripSeparator()\n elif cmd == SECTION_BREAK:\n item = WinForms.ToolStripSeparator()\n else:\n if cmd.icon is not None:\n native_icon = cmd.icon._impl.native\n item = WinForms.ToolStripMenuItem(cmd.label, native_icon.ToBitmap())\n else:\n item = WinForms.ToolStripMenuItem(cmd.label)\n item.Click += cmd._impl.as_handler()\n cmd._impl.native.append(item)\n self.toolbar_native.Items.Add(item)\n\n def set_position(self, position):\n pass\n\n def set_size(self, size):\n self.native.ClientSize = Size(*self.interface._size)\n\n def set_app(self, app):\n if app is None:\n return\n icon_impl = app.interface.icon._impl\n if icon_impl is None:\n return\n self.native.Icon = icon_impl.native\n\n @property\n def vertical_shift(self):\n # vertical shift is the toolbar height or 0\n result = 0\n try:\n result += self.native.interface._impl.toolbar_native.Height\n except AttributeError:\n pass\n try:\n result += self.native.interface._impl.native.MainMenuStrip.Height\n except AttributeError:\n pass\n return result\n\n def set_content(self, widget):\n if self.toolbar_native:\n self.native.Controls.Add(self.toolbar_native)\n # Create the lookup table of menu items,\n # then force the creation of the menus.\n self.native.Controls.Add(widget.native)\n\n # Set the widget's viewport to be based on the window's content.\n widget.viewport = WinFormsViewport(native=self.native, frame=self)\n widget.frame = self\n\n # Add all children to the content widget.\n for child in widget.interface.children:\n child._impl.container = widget\n\n def set_title(self, title):\n self.native.Text = title\n\n def show(self):\n # The first render of the content will establish the\n # minimum possible content size; use that to enforce\n # a minimum window size.\n TITLEBAR_HEIGHT = WinForms.SystemInformation.CaptionHeight\n # Now that the content is visible, we can do our initial hinting,\n # and use that as the basis for setting the minimum window size.\n self.interface.content._impl.rehint()\n self.interface.content.style.layout(\n self.interface.content,\n WinFormsViewport(native=None, frame=None),\n )\n self.native.MinimumSize = Size(\n int(self.interface.content.layout.width),\n int(self.interface.content.layout.height) + TITLEBAR_HEIGHT\n )\n self.interface.content.refresh()\n\n self.native.Show()\n\n def winforms_FormClosing(self, event, handler):\n if self.interface.app.on_exit:\n self.interface.app.on_exit(self.interface.app)\n\n def set_full_screen(self, is_full_screen):\n self.interface.factory.not_implemented('Window.set_full_screen()')\n\n def on_close(self):\n pass\n\n def close(self):\n self.native.Close()\n\n def winforms_resize(self, sender, args):\n if self.interface.content:\n # Re-layout the content\n self.interface.content.refresh()\n\n def info_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK)\n\n def question_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.YesNo)\n return result\n\n def confirm_dialog(self, title, message):\n result = WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OKCancel)\n # this returns 1 (DialogResult.OK enum) for OK and 2 for Cancel\n return True if result == WinForms.DialogResult.OK else False\n\n def error_dialog(self, title, message):\n return WinForms.MessageBox.Show(message, title, WinForms.MessageBoxButtons.OK,\n WinForms.MessageBoxIcon.Error)\n\n def stack_trace_dialog(self, title, message, content, retry=False):\n pass\n\n def save_file_dialog(self, title, suggested_filename, file_types):\n dialog = WinForms.SaveFileDialog()\n dialog.Title = title\n if suggested_filename is not None:\n dialog.FileName = suggested_filename\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileName\n else:\n raise ValueError(\"No filename provided in the save file dialog\")\n\n def open_file_dialog(self, title, initial_directory, file_types, multiselect):\n dialog = WinForms.OpenFileDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n if file_types is not None:\n dialog.Filter = self.build_filter(file_types)\n if multiselect:\n dialog.Multiselect = True\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return dialog.FileNames if multiselect else dialog.FileName\n else:\n raise ValueError(\"No filename provided in the open file dialog\")\n\n def select_folder_dialog(self, title, initial_directory, multiselect):\n dialog = WinForms.FolderBrowserDialog()\n dialog.Title = title\n if initial_directory is not None:\n dialog.InitialDirectory = initial_directory\n\n if dialog.ShowDialog() == WinForms.DialogResult.OK:\n return [dialog.SelectedPath]\n else:\n raise ValueError(\"No folder provided in the select folder dialog\")\n\n def build_filter(self, file_types):\n file_string = \"{0} files (*.{0})|*.{0}\"\n return '|'.join([file_string.format(ext) for ext in file_types]) + \\\n \"|All files (*.*)|*.*\"\n",
"path": "src/winforms/toga_winforms/window.py"
}
] | diff --git a/src/winforms/toga_winforms/window.py b/src/winforms/toga_winforms/window.py
index 4ad54ae907..ff5305b782 100644
--- a/src/winforms/toga_winforms/window.py
+++ b/src/winforms/toga_winforms/window.py
@@ -187,7 +187,7 @@ def open_file_dialog(self, title, initial_directory, file_types, multiselect):
if multiselect:
dialog.Multiselect = True
if dialog.ShowDialog() == WinForms.DialogResult.OK:
- return dialog.FileName
+ return dialog.FileNames if multiselect else dialog.FileName
else:
raise ValueError("No filename provided in the open file dialog")
| toga-winforms\windows.py openFileDialog needs an "s"
**Describe the bug**
When using multiselect = True in winforms the returned value is a single file because
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName
should be
if dialog.ShowDialog() == WinForms.DialogResult.OK:
return dialog.FileName**s**
**To Reproduce**
fname = self.main_window.open_file_dialog(
title="Open the file",
multiselect=True
)
**Expected behavior**
returns a list of files (fname) which is true on a Mac. On Windows it is the name of the first file as a single string . With suggested fix return selection but it still needs to be coerced into a list.
**Environment:**
- Operating System: Mac OS 10.15.6 & Windows 10
- Python version: 3.8
- Software versions:
- Briefcase: 0.3.3
- Toga: 0.3.0 dev23
|
facebookresearch__hydra-2677 | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\")\n\n\nif __name__ == \"__main__\":\n my_app()\n",
"path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"
}
] | [
{
"content": "# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved\nimport os\n\nfrom omegaconf import DictConfig\n\nimport hydra\n\n\[email protected](version_base=None)\ndef my_app(_cfg: DictConfig) -> None:\n print(f\"Working directory : {os.getcwd()}\")\n print(\n f\"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}\"\n )\n\n\nif __name__ == \"__main__\":\n my_app()\n",
"path": "examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py"
}
] | diff --git a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
index 922c84a523..8106e42256 100644
--- a/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
+++ b/examples/tutorials/basic/running_your_hydra_app/3_working_directory/my_app.py
@@ -9,7 +9,9 @@
@hydra.main(version_base=None)
def my_app(_cfg: DictConfig) -> None:
print(f"Working directory : {os.getcwd()}")
- print(f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}")
+ print(
+ f"Output directory : {hydra.core.hydra_config.HydraConfig.get().runtime.output_dir}"
+ )
if __name__ == "__main__":
diff --git a/tests/test_examples/test_tutorials_basic.py b/tests/test_examples/test_tutorials_basic.py
index 941e3f9fd1..05e2bf047a 100644
--- a/tests/test_examples/test_tutorials_basic.py
+++ b/tests/test_examples/test_tutorials_basic.py
@@ -53,7 +53,8 @@ def test_tutorial_working_directory(tmpdir: Path) -> None:
"hydra.job.chdir=True",
]
result, _err = run_python_script(cmd)
- assert result == "Working directory : {}".format(tmpdir)
+ working_directory, output_directory = result.split("\n")
+ assert working_directory == f"Working directory : {tmpdir}"
def test_tutorial_working_directory_original_cwd(tmpdir: Path) -> None:
| [Bug] Fix failing tests
Several tests are broken on main
|
Pycord-Development__pycord-888 | [
{
"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\n\nimport asyncio\nimport collections\nimport collections.abc\nimport inspect\nimport importlib.util\nimport sys\nimport traceback\nimport types\nfrom typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union\n\nimport discord\n\nfrom .core import GroupMixin\nfrom .view import StringView\nfrom .context import Context\nfrom . import errors\nfrom .help import HelpCommand, DefaultHelpCommand\nfrom .cog import Cog\n\nif TYPE_CHECKING:\n import importlib.machinery\n\n from discord.message import Message\n from ._types import (\n Check,\n CoroFunc,\n )\n\n__all__ = (\n 'when_mentioned',\n 'when_mentioned_or',\n 'Bot',\n 'AutoShardedBot',\n)\n\nMISSING: Any = discord.utils.MISSING\n\nT = TypeVar('T')\nCFT = TypeVar('CFT', bound='CoroFunc')\nCXT = TypeVar('CXT', bound='Context')\n\ndef when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:\n \"\"\"A callable that implements a command prefix equivalent to being mentioned.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n \"\"\"\n # bot.user will never be None when this is called\n return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore\n\ndef when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:\n \"\"\"A callable that implements when mentioned or other prefixes provided.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n\n Example\n --------\n\n .. code-block:: python3\n\n bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))\n\n\n .. note::\n\n This callable returns another callable, so if this is done inside a custom\n callable, you must call the returned callable, for example:\n\n .. code-block:: python3\n\n async def get_prefix(bot, message):\n extras = await prefixes_for(message.guild) # returns a list\n return commands.when_mentioned_or(*extras)(bot, message)\n\n\n See Also\n ----------\n :func:`.when_mentioned`\n \"\"\"\n def inner(bot, msg):\n r = list(prefixes)\n r = when_mentioned(bot, msg) + r\n return r\n\n return inner\n\ndef _is_submodule(parent: str, child: str) -> bool:\n return parent == child or child.startswith(parent + \".\")\n\nclass _DefaultRepr:\n def __repr__(self):\n return '<default-help-command>'\n\n_default = _DefaultRepr()\n\nclass BotBase(GroupMixin, discord.cog.CogMixin):\n _supports_prefixed_commands = True\n def __init__(self, command_prefix=when_mentioned, help_command=_default, **options):\n super().__init__(**options)\n self.command_prefix = command_prefix\n self._help_command = None\n self.strip_after_prefix = options.get('strip_after_prefix', False)\n\n if help_command is _default:\n self.help_command = DefaultHelpCommand()\n else:\n self.help_command = help_command\n\n @discord.utils.copy_doc(discord.Client.close)\n async def close(self) -> None:\n for extension in tuple(self.__extensions):\n try:\n self.unload_extension(extension)\n except Exception:\n pass\n\n for cog in tuple(self.__cogs):\n try:\n self.remove_cog(cog)\n except Exception:\n pass\n\n await super().close() # type: ignore\n\n async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get('on_command_error', None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f'Ignoring exception in command {context.command}:', file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if len(data) == 0:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await discord.utils.async_all(f(ctx) for f in data) # type: ignore\n # help command stuff\n\n @property\n def help_command(self) -> Optional[HelpCommand]:\n return self._help_command\n\n @help_command.setter\n def help_command(self, value: Optional[HelpCommand]) -> None:\n if value is not None:\n if not isinstance(value, HelpCommand):\n raise TypeError('help_command must be a subclass of HelpCommand')\n if self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = value\n value._add_to_bot(self)\n elif self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = None\n else:\n self._help_command = None\n\n # command processing\n\n async def get_prefix(self, message: Message) -> Union[List[str], str]:\n \"\"\"|coro|\n\n Retrieves the prefix the bot is listening to\n with the message as a context.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message context to get the prefix of.\n\n Returns\n --------\n Union[List[:class:`str`], :class:`str`]\n A list of prefixes or a single prefix that the bot is\n listening for.\n \"\"\"\n prefix = ret = self.command_prefix\n if callable(prefix):\n ret = await discord.utils.maybe_coroutine(prefix, self, message)\n\n if not isinstance(ret, str):\n try:\n ret = list(ret)\n except TypeError:\n # It's possible that a generator raised this exception. Don't\n # replace it with our own error if that's the case.\n if isinstance(ret, collections.abc.Iterable):\n raise\n\n raise TypeError(\"command_prefix must be plain string, iterable of strings, or callable \"\n f\"returning either of these, not {ret.__class__.__name__}\")\n\n if not ret:\n raise ValueError(\"Iterable command_prefix must contain at least one prefix\")\n\n return ret\n\n async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:\n r\"\"\"|coro|\n\n Returns the invocation context from the message.\n\n This is a more low-level counter-part for :meth:`.process_commands`\n to allow users more fine grained control over the processing.\n\n The returned context is not guaranteed to be a valid invocation\n context, :attr:`.Context.valid` must be checked to make sure it is.\n If the context is not valid then it is not a valid candidate to be\n invoked under :meth:`~.Bot.invoke`.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.Context`. Should a custom\n class be provided, it must be similar enough to :class:`.Context`\\'s\n interface.\n\n Returns\n --------\n :class:`.Context`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n\n view = StringView(message.content)\n ctx = cls(prefix=None, view=view, bot=self, message=message)\n\n if message.author.id == self.user.id: # type: ignore\n return ctx\n\n prefix = await self.get_prefix(message)\n invoked_prefix = prefix\n\n if isinstance(prefix, str):\n if not view.skip_string(prefix):\n return ctx\n else:\n try:\n # if the context class' __init__ consumes something from the view this\n # will be wrong. That seems unreasonable though.\n if message.content.startswith(tuple(prefix)):\n invoked_prefix = discord.utils.find(view.skip_string, prefix)\n else:\n return ctx\n\n except TypeError:\n if not isinstance(prefix, list):\n raise TypeError(\"get_prefix must return either a string or a list of string, \"\n f\"not {prefix.__class__.__name__}\")\n\n # It's possible a bad command_prefix got us here.\n for value in prefix:\n if not isinstance(value, str):\n raise TypeError(\"Iterable command_prefix or list returned from get_prefix must \"\n f\"contain only strings, not {value.__class__.__name__}\")\n\n # Getting here shouldn't happen\n raise\n\n if self.strip_after_prefix:\n view.skip_ws()\n\n invoker = view.get_word()\n ctx.invoked_with = invoker\n # type-checker fails to narrow invoked_prefix type.\n ctx.prefix = invoked_prefix # type: ignore\n ctx.command = self.all_commands.get(invoker)\n return ctx\n\n async def invoke(self, ctx: Context) -> None:\n \"\"\"|coro|\n\n Invokes the command given under the invocation context and\n handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.Context`\n The invocation context to invoke.\n \"\"\"\n if ctx.command is not None:\n self.dispatch('command', ctx)\n try:\n if await self.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise errors.CheckFailure('The global check once functions failed.')\n except errors.CommandError as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self.dispatch('command_completion', ctx)\n elif ctx.invoked_with:\n exc = errors.CommandNotFound(f'Command \"{ctx.invoked_with}\" is not found')\n self.dispatch('command_error', ctx, exc)\n\n async def process_commands(self, message: Message) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_message`\n event. If you choose to override the :func:`.on_message` event, then\n you should invoke this coroutine as well.\n\n This is built using other low level tools, and is equivalent to a\n call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.\n\n This also checks if the message's author is a bot and doesn't\n call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to process commands for.\n \"\"\"\n if message.author.bot:\n return\n\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n\n async def on_message(self, message):\n await self.process_commands(message)\n\n\nclass Bot(BotBase, discord.Bot):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Bot` and as a result\n anything that you can do with a :class:`discord.Bot` you can do with\n this bot.\n\n This class also subclasses :class:`.GroupMixin` to provide the functionality\n to manage commands.\n\n Attributes\n -----------\n command_prefix\n The command prefix is what the message content must contain initially\n to have a command invoked. This prefix could either be a string to\n indicate what the prefix should be, or a callable that takes in the bot\n as its first parameter and :class:`discord.Message` as its second\n parameter and returns the prefix. This is to facilitate \"dynamic\"\n command prefixes. This callable can be either a regular function or\n a coroutine.\n\n An empty string as the prefix always matches, enabling prefix-less\n command invocation. While this may be useful in DMs it should be avoided\n in servers, as it's likely to cause performance issues and unintended\n command invocations.\n\n The command prefix could also be an iterable of strings indicating that\n multiple checks for the prefix should be used and the first one to\n match will be the invocation prefix. You can get this prefix via\n :attr:`.Context.prefix`. To avoid confusion empty iterables are not\n allowed.\n\n .. note::\n\n When passing multiple prefixes be careful to not pass a prefix\n that matches a longer prefix occurring later in the sequence. For\n example, if the command prefix is ``('!', '!?')`` the ``'!?'``\n prefix will never be matched to any message as the previous one\n matches messages starting with ``!?``. This is especially important\n when passing an empty string, it should always be last as no prefix\n after it will be matched.\n case_insensitive: :class:`bool`\n Whether the commands should be case insensitive. Defaults to ``False``. This\n attribute does not carry over to groups. You must set it to every group if\n you require group commands to be case insensitive as well.\n help_command: Optional[:class:`.HelpCommand`]\n The help command implementation to use. This can be dynamically\n set at runtime. To remove the help command pass ``None``. For more\n information on implementing a help command, see :ref:`ext_commands_help_command`.\n strip_after_prefix: :class:`bool`\n Whether to strip whitespace characters after encountering the command\n prefix. This allows for ``! hello`` and ``!hello`` to both work if\n the ``command_prefix`` is set to ``!``. Defaults to ``False``.\n\n .. versionadded:: 1.7\n \"\"\"\n pass\n\nclass AutoShardedBot(BotBase, discord.AutoShardedBot):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedBot` instead.\n \"\"\"\n pass\n",
"path": "discord/ext/commands/bot.py"
}
] | [
{
"content": "\"\"\"\nThe MIT License (MIT)\n\nCopyright (c) 2015-2021 Rapptz\nCopyright (c) 2021-present Pycord Development\n\nPermission is hereby granted, free of charge, to any person obtaining a\ncopy of this software and associated documentation files (the \"Software\"),\nto deal in the Software without restriction, including without limitation\nthe rights to use, copy, modify, merge, publish, distribute, sublicense,\nand/or sell copies of the Software, and to permit persons to whom the\nSoftware is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS\nOR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\nFROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\nDEALINGS IN THE SOFTWARE.\n\"\"\"\n\nfrom __future__ import annotations\n\n\nimport asyncio\nimport collections\nimport collections.abc\nimport inspect\nimport importlib.util\nimport sys\nimport traceback\nimport types\nfrom typing import Any, Callable, Mapping, List, Dict, TYPE_CHECKING, Optional, TypeVar, Type, Union\n\nimport discord\n\nfrom .core import GroupMixin\nfrom .view import StringView\nfrom .context import Context\nfrom . import errors\nfrom .help import HelpCommand, DefaultHelpCommand\nfrom .cog import Cog\n\nif TYPE_CHECKING:\n import importlib.machinery\n\n from discord.message import Message\n from ._types import (\n Check,\n CoroFunc,\n )\n\n__all__ = (\n 'when_mentioned',\n 'when_mentioned_or',\n 'Bot',\n 'AutoShardedBot',\n)\n\nMISSING: Any = discord.utils.MISSING\n\nT = TypeVar('T')\nCFT = TypeVar('CFT', bound='CoroFunc')\nCXT = TypeVar('CXT', bound='Context')\n\ndef when_mentioned(bot: Union[Bot, AutoShardedBot], msg: Message) -> List[str]:\n \"\"\"A callable that implements a command prefix equivalent to being mentioned.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n \"\"\"\n # bot.user will never be None when this is called\n return [f'<@{bot.user.id}> ', f'<@!{bot.user.id}> '] # type: ignore\n\ndef when_mentioned_or(*prefixes: str) -> Callable[[Union[Bot, AutoShardedBot], Message], List[str]]:\n \"\"\"A callable that implements when mentioned or other prefixes provided.\n\n These are meant to be passed into the :attr:`.Bot.command_prefix` attribute.\n\n Example\n --------\n\n .. code-block:: python3\n\n bot = commands.Bot(command_prefix=commands.when_mentioned_or('!'))\n\n\n .. note::\n\n This callable returns another callable, so if this is done inside a custom\n callable, you must call the returned callable, for example:\n\n .. code-block:: python3\n\n async def get_prefix(bot, message):\n extras = await prefixes_for(message.guild) # returns a list\n return commands.when_mentioned_or(*extras)(bot, message)\n\n\n See Also\n ----------\n :func:`.when_mentioned`\n \"\"\"\n def inner(bot, msg):\n r = list(prefixes)\n r = when_mentioned(bot, msg) + r\n return r\n\n return inner\n\ndef _is_submodule(parent: str, child: str) -> bool:\n return parent == child or child.startswith(parent + \".\")\n\nclass _DefaultRepr:\n def __repr__(self):\n return '<default-help-command>'\n\n_default = _DefaultRepr()\n\nclass BotBase(GroupMixin, discord.cog.CogMixin):\n _supports_prefixed_commands = True\n def __init__(self, command_prefix=when_mentioned, help_command=_default, **options):\n super().__init__(**options)\n self.command_prefix = command_prefix\n self._help_command = None\n self.strip_after_prefix = options.get('strip_after_prefix', False)\n\n if help_command is _default:\n self.help_command = DefaultHelpCommand()\n else:\n self.help_command = help_command\n\n @discord.utils.copy_doc(discord.Client.close)\n async def close(self) -> None:\n for extension in tuple(self.__extensions):\n try:\n self.unload_extension(extension)\n except Exception:\n pass\n\n for cog in tuple(self.__cogs):\n try:\n self.remove_cog(cog)\n except Exception:\n pass\n\n await super().close() # type: ignore\n\n async def on_command_error(self, context: Context, exception: errors.CommandError) -> None:\n \"\"\"|coro|\n\n The default command error handler provided by the bot.\n\n By default this prints to :data:`sys.stderr` however it could be\n overridden to have a different implementation.\n\n This only fires if you do not specify any listeners for command error.\n \"\"\"\n if self.extra_events.get('on_command_error', None):\n return\n\n command = context.command\n if command and command.has_error_handler():\n return\n\n cog = context.cog\n if cog and cog.has_error_handler():\n return\n\n print(f'Ignoring exception in command {context.command}:', file=sys.stderr)\n traceback.print_exception(type(exception), exception, exception.__traceback__, file=sys.stderr)\n\n async def can_run(self, ctx: Context, *, call_once: bool = False) -> bool:\n data = self._check_once if call_once else self._checks\n\n if len(data) == 0:\n return True\n\n # type-checker doesn't distinguish between functions and methods\n return await discord.utils.async_all(f(ctx) for f in data) # type: ignore\n # help command stuff\n\n @property\n def help_command(self) -> Optional[HelpCommand]:\n return self._help_command\n\n @help_command.setter\n def help_command(self, value: Optional[HelpCommand]) -> None:\n if value is not None:\n if not isinstance(value, HelpCommand):\n raise TypeError('help_command must be a subclass of HelpCommand')\n if self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = value\n value._add_to_bot(self)\n elif self._help_command is not None:\n self._help_command._remove_from_bot(self)\n self._help_command = None\n else:\n self._help_command = None\n\n # command processing\n\n async def get_prefix(self, message: Message) -> Union[List[str], str]:\n \"\"\"|coro|\n\n Retrieves the prefix the bot is listening to\n with the message as a context.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message context to get the prefix of.\n\n Returns\n --------\n Union[List[:class:`str`], :class:`str`]\n A list of prefixes or a single prefix that the bot is\n listening for.\n \"\"\"\n prefix = ret = self.command_prefix\n if callable(prefix):\n ret = await discord.utils.maybe_coroutine(prefix, self, message)\n\n if not isinstance(ret, str):\n try:\n ret = list(ret)\n except TypeError:\n # It's possible that a generator raised this exception. Don't\n # replace it with our own error if that's the case.\n if isinstance(ret, collections.abc.Iterable):\n raise\n\n raise TypeError(\"command_prefix must be plain string, iterable of strings, or callable \"\n f\"returning either of these, not {ret.__class__.__name__}\")\n\n if not ret:\n raise ValueError(\"Iterable command_prefix must contain at least one prefix\")\n\n return ret\n\n async def get_context(self, message: Message, *, cls: Type[CXT] = Context) -> CXT:\n r\"\"\"|coro|\n\n Returns the invocation context from the message.\n\n This is a more low-level counter-part for :meth:`.process_commands`\n to allow users more fine grained control over the processing.\n\n The returned context is not guaranteed to be a valid invocation\n context, :attr:`.Context.valid` must be checked to make sure it is.\n If the context is not valid then it is not a valid candidate to be\n invoked under :meth:`~.Bot.invoke`.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to get the invocation context from.\n cls\n The factory class that will be used to create the context.\n By default, this is :class:`.Context`. Should a custom\n class be provided, it must be similar enough to :class:`.Context`\\'s\n interface.\n\n Returns\n --------\n :class:`.Context`\n The invocation context. The type of this can change via the\n ``cls`` parameter.\n \"\"\"\n\n view = StringView(message.content)\n ctx = cls(prefix=None, view=view, bot=self, message=message)\n\n if message.author.id == self.user.id: # type: ignore\n return ctx\n\n prefix = await self.get_prefix(message)\n invoked_prefix = prefix\n\n if isinstance(prefix, str):\n if not view.skip_string(prefix):\n return ctx\n else:\n try:\n # if the context class' __init__ consumes something from the view this\n # will be wrong. That seems unreasonable though.\n if message.content.startswith(tuple(prefix)):\n invoked_prefix = discord.utils.find(view.skip_string, prefix)\n else:\n return ctx\n\n except TypeError:\n if not isinstance(prefix, list):\n raise TypeError(\"get_prefix must return either a string or a list of string, \"\n f\"not {prefix.__class__.__name__}\")\n\n # It's possible a bad command_prefix got us here.\n for value in prefix:\n if not isinstance(value, str):\n raise TypeError(\"Iterable command_prefix or list returned from get_prefix must \"\n f\"contain only strings, not {value.__class__.__name__}\")\n\n # Getting here shouldn't happen\n raise\n\n if self.strip_after_prefix:\n view.skip_ws()\n\n invoker = view.get_word()\n ctx.invoked_with = invoker\n # type-checker fails to narrow invoked_prefix type.\n ctx.prefix = invoked_prefix # type: ignore\n ctx.command = self.prefixed_commands.get(invoker)\n return ctx\n\n async def invoke(self, ctx: Context) -> None:\n \"\"\"|coro|\n\n Invokes the command given under the invocation context and\n handles all the internal event dispatch mechanisms.\n\n Parameters\n -----------\n ctx: :class:`.Context`\n The invocation context to invoke.\n \"\"\"\n if ctx.command is not None:\n self.dispatch('command', ctx)\n try:\n if await self.can_run(ctx, call_once=True):\n await ctx.command.invoke(ctx)\n else:\n raise errors.CheckFailure('The global check once functions failed.')\n except errors.CommandError as exc:\n await ctx.command.dispatch_error(ctx, exc)\n else:\n self.dispatch('command_completion', ctx)\n elif ctx.invoked_with:\n exc = errors.CommandNotFound(f'Command \"{ctx.invoked_with}\" is not found')\n self.dispatch('command_error', ctx, exc)\n\n async def process_commands(self, message: Message) -> None:\n \"\"\"|coro|\n\n This function processes the commands that have been registered\n to the bot and other groups. Without this coroutine, none of the\n commands will be triggered.\n\n By default, this coroutine is called inside the :func:`.on_message`\n event. If you choose to override the :func:`.on_message` event, then\n you should invoke this coroutine as well.\n\n This is built using other low level tools, and is equivalent to a\n call to :meth:`~.Bot.get_context` followed by a call to :meth:`~.Bot.invoke`.\n\n This also checks if the message's author is a bot and doesn't\n call :meth:`~.Bot.get_context` or :meth:`~.Bot.invoke` if so.\n\n Parameters\n -----------\n message: :class:`discord.Message`\n The message to process commands for.\n \"\"\"\n if message.author.bot:\n return\n\n ctx = await self.get_context(message)\n await self.invoke(ctx)\n\n async def on_message(self, message):\n await self.process_commands(message)\n\n\nclass Bot(BotBase, discord.Bot):\n \"\"\"Represents a discord bot.\n\n This class is a subclass of :class:`discord.Bot` and as a result\n anything that you can do with a :class:`discord.Bot` you can do with\n this bot.\n\n This class also subclasses :class:`.GroupMixin` to provide the functionality\n to manage commands.\n\n Attributes\n -----------\n command_prefix\n The command prefix is what the message content must contain initially\n to have a command invoked. This prefix could either be a string to\n indicate what the prefix should be, or a callable that takes in the bot\n as its first parameter and :class:`discord.Message` as its second\n parameter and returns the prefix. This is to facilitate \"dynamic\"\n command prefixes. This callable can be either a regular function or\n a coroutine.\n\n An empty string as the prefix always matches, enabling prefix-less\n command invocation. While this may be useful in DMs it should be avoided\n in servers, as it's likely to cause performance issues and unintended\n command invocations.\n\n The command prefix could also be an iterable of strings indicating that\n multiple checks for the prefix should be used and the first one to\n match will be the invocation prefix. You can get this prefix via\n :attr:`.Context.prefix`. To avoid confusion empty iterables are not\n allowed.\n\n .. note::\n\n When passing multiple prefixes be careful to not pass a prefix\n that matches a longer prefix occurring later in the sequence. For\n example, if the command prefix is ``('!', '!?')`` the ``'!?'``\n prefix will never be matched to any message as the previous one\n matches messages starting with ``!?``. This is especially important\n when passing an empty string, it should always be last as no prefix\n after it will be matched.\n case_insensitive: :class:`bool`\n Whether the commands should be case insensitive. Defaults to ``False``. This\n attribute does not carry over to groups. You must set it to every group if\n you require group commands to be case insensitive as well.\n help_command: Optional[:class:`.HelpCommand`]\n The help command implementation to use. This can be dynamically\n set at runtime. To remove the help command pass ``None``. For more\n information on implementing a help command, see :ref:`ext_commands_help_command`.\n strip_after_prefix: :class:`bool`\n Whether to strip whitespace characters after encountering the command\n prefix. This allows for ``! hello`` and ``!hello`` to both work if\n the ``command_prefix`` is set to ``!``. Defaults to ``False``.\n\n .. versionadded:: 1.7\n \"\"\"\n pass\n\nclass AutoShardedBot(BotBase, discord.AutoShardedBot):\n \"\"\"This is similar to :class:`.Bot` except that it is inherited from\n :class:`discord.AutoShardedBot` instead.\n \"\"\"\n pass\n",
"path": "discord/ext/commands/bot.py"
}
] | diff --git a/discord/ext/commands/bot.py b/discord/ext/commands/bot.py
index 57d5955be8..ed8bc60263 100644
--- a/discord/ext/commands/bot.py
+++ b/discord/ext/commands/bot.py
@@ -314,7 +314,7 @@ class be provided, it must be similar enough to :class:`.Context`\'s
ctx.invoked_with = invoker
# type-checker fails to narrow invoked_prefix type.
ctx.prefix = invoked_prefix # type: ignore
- ctx.command = self.all_commands.get(invoker)
+ ctx.command = self.prefixed_commands.get(invoker)
return ctx
async def invoke(self, ctx: Context) -> None:
| case_insensitive not working properly in 2.0.0b1
### Summary
Commands will only be accepted if they are full lowercase
### Reproduction Steps
Implement command with not only lowercase letters
Add case_insensitive=True
Command is only accessible with full lowercase
Tested the same setup in 2.0.0b1 and 1.7.3
1.7.3 accepted all inputs as Command, 2.0.0b1 only accepts lowercase commands
### Minimal Reproducible Code
```python
from discord.ext import commands
comand_prefix = "-"
bot_token = open("Bot_Token.txt").readline()
bot = commands.Bot(command_prefix=comand_prefix, case_insensitive=True)
class a(commands.Cog, name="Cog Name"):
@commands.command()
async def Test(self, ctx):
print("test")
bot.add_cog(a(bot))
bot.run(bot_token)
# -> Accepts "-test" but not "-Test"
```
### Expected Results
Accept commands case insensitive
### Actual Results
Accepts commands only when lowercase, otherwise CommandNotFound error
### Intents
None
### System Information
- Python v3.10.0-final
- py-cord v2.0.0-beta
- py-cord pkg_resources: v2.0.0b1
- aiohttp v3.7.4.post0
- system info: Windows 10 10.0.19043
|
django__channels-1886 | [
{
"content": "class BaseMiddleware:\n \"\"\"\n Base class for implementing ASGI middleware. Inherit from this and\n override the setup() method if you want to do things before you\n get to.\n\n Note that subclasses of this are not self-safe; don't store state on\n the instance, as it serves multiple application instances. Instead, use\n scope.\n \"\"\"\n\n def __init__(self, inner):\n \"\"\"\n Middleware constructor - just takes inner application.\n \"\"\"\n self.inner = inner\n\n async def __call__(self, scope, receive, send):\n \"\"\"\n ASGI application; can insert things into the scope and run asynchronous\n code.\n \"\"\"\n # Copy scope to stop changes going upstream\n scope = dict(scope)\n # Run the inner application along with the scope\n return await self.inner(scope, receive, send)\n",
"path": "channels/middleware.py"
}
] | [
{
"content": "class BaseMiddleware:\n \"\"\"\n Base class for implementing ASGI middleware.\n\n Note that subclasses of this are not self-safe; don't store state on\n the instance, as it serves multiple application instances. Instead, use\n scope.\n \"\"\"\n\n def __init__(self, inner):\n \"\"\"\n Middleware constructor - just takes inner application.\n \"\"\"\n self.inner = inner\n\n async def __call__(self, scope, receive, send):\n \"\"\"\n ASGI application; can insert things into the scope and run asynchronous\n code.\n \"\"\"\n # Copy scope to stop changes going upstream\n scope = dict(scope)\n # Run the inner application along with the scope\n return await self.inner(scope, receive, send)\n",
"path": "channels/middleware.py"
}
] | diff --git a/channels/middleware.py b/channels/middleware.py
index 8fcf36531..f3b994246 100644
--- a/channels/middleware.py
+++ b/channels/middleware.py
@@ -1,8 +1,6 @@
class BaseMiddleware:
"""
- Base class for implementing ASGI middleware. Inherit from this and
- override the setup() method if you want to do things before you
- get to.
+ Base class for implementing ASGI middleware.
Note that subclasses of this are not self-safe; don't store state on
the instance, as it serves multiple application instances. Instead, use
| Improve BaseMiddleware class docstring
The class docstring of [BaseMiddleware](https://github.com/django/channels/blob/master/channels/middleware.py#L3) should be updated to explain that `__call__` (and `__init__`) must be changed for the middleware.
> Base class for implementing ASGI middleware. Inherit from this and
> override the setup() method if you want to do things before you
> get to.
Also the purpose of the middlewares could also added to the docstring that they add additional information's in the scope with a reference to the ASGI reference or channels documentation (consumers scope section). This would help to understand the purpose of this to new users, what they can expect from the middlewares and what not.
|
cocotb__cocotb-745 | [
{
"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n A collections of triggers which a testbench can 'yield'\n\"\"\"\nimport os\nimport weakref\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\nfrom cocotb.log import SimLog\nfrom cocotb.result import raise_error\nfrom cocotb.utils import get_sim_steps, get_time_from_sim_steps\n\n\nclass TriggerException(Exception):\n pass\n\n\nclass Trigger(object):\n \"\"\"Base class to derive from\"\"\"\n def __init__(self):\n self.log = SimLog(\"cocotb.%s\" % (self.__class__.__name__), id(self))\n self.signal = None\n self.primed = False\n\n def prime(self, *args):\n self.primed = True\n\n def unprime(self):\n \"\"\"Remove any pending callbacks if necessary\"\"\"\n self.primed = False\n\n def __del__(self):\n \"\"\"Ensure if a trigger drops out of scope we remove any pending\n callbacks\"\"\"\n self.unprime()\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass PythonTrigger(Trigger):\n \"\"\"Python triggers don't use GPI at all\n\n For example notification of coroutine completion etc\n\n TODO:\n Still need to implement unprime\n \"\"\"\n pass\n\n\nclass GPITrigger(Trigger):\n \"\"\"\n Base Trigger class for GPI triggers\n\n Consumes simulation time\n \"\"\"\n def __init__(self):\n Trigger.__init__(self)\n\n # Required to ensure documentation can build\n # if simulator is not None:\n # self.cbhdl = simulator.create_callback(self)\n # else:\n self.cbhdl = 0\n\n def unprime(self):\n \"\"\"Disable a primed trigger, can be reprimed\"\"\"\n if self.cbhdl != 0:\n simulator.deregister_callback(self.cbhdl)\n self.cbhdl = 0\n Trigger.unprime(self)\n\n def __del__(self):\n \"\"\"Remove knowledge of the trigger\"\"\"\n if self.cbhdl != 0:\n self.unprime()\n Trigger.__del__(self)\n\n\nclass Timer(GPITrigger):\n \"\"\"\n Execution will resume when the specified time period expires\n\n Consumes simulation time\n \"\"\"\n def __init__(self, time_ps, units=None):\n GPITrigger.__init__(self)\n self.sim_steps = get_sim_steps(time_ps, units)\n\n def prime(self, callback):\n \"\"\"Register for a timed callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_timed_callback(self.sim_steps,\n callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%1.2fps)\" % get_time_from_sim_steps(self.sim_steps,units='ps')\n\nclass _ReadOnly(GPITrigger):\n \"\"\"\n Execution will resume when the readonly portion of the sim cycles is\n readched\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_readonly_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readonly)\"\n\n_ro = _ReadOnly()\n\n\ndef ReadOnly():\n return _ro\n\n\nclass _ReadWrite(GPITrigger):\n \"\"\"\n Execution will resume when the readwrite portion of the sim cycles is\n reached\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n # import pdb\n # pdb.set_trace()\n self.cbhdl = simulator.register_rwsynch_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readwritesync)\"\n\n_rw = _ReadWrite()\n\n\ndef ReadWrite():\n return _rw\n\n\nclass _NextTimeStep(GPITrigger):\n \"\"\"\n Execution will resume when the next time step is started\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_nextstep_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(nexttimestep)\"\n\n_nxts = _NextTimeStep()\n\n\ndef NextTimeStep():\n return _nxts\n\n\nclass _EdgeBase(GPITrigger):\n \"\"\"\n Execution will resume when an edge occurs on the provided signal\n \"\"\"\n @classmethod\n @property\n def _edge_type(self):\n \"\"\"\n The edge type, as understood by the C code. Must be set in subclasses\n \"\"\"\n raise NotImplementedError\n\n # Ensure that each signal has at most one edge trigger per edge type.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, signal):\n # find the existing instance, if possible - else create a new one\n key = (signal, cls._edge_type)\n try:\n return cls._instances[key]\n except KeyError:\n instance = super(_EdgeBase, cls).__new__(cls)\n cls._instances[key] = instance\n return instance\n\n def __init__(self, signal):\n super(_EdgeBase, self).__init__()\n self.signal = signal\n\n def prime(self, callback):\n \"\"\"Register notification of a value change via a callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_value_change_callback(\n self.signal._handle, callback, type(self)._edge_type, self\n )\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n super(_EdgeBase, self).prime()\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass RisingEdge(_EdgeBase):\n \"\"\" Triggers on the rising edge of the provided signal \"\"\"\n _edge_type = 1\n\n\nclass FallingEdge(_EdgeBase):\n \"\"\" Triggers on the falling edge of the provided signal \"\"\"\n _edge_type = 2\n\n\nclass Edge(_EdgeBase):\n \"\"\" Triggers on either edge in a signal \"\"\"\n _edge_type = 3\n\n\nclass ClockCycles(GPITrigger):\n \"\"\"\n Execution will resume after N rising edges or N falling edges\n \"\"\"\n def __init__(self, signal, num_cycles, rising=True):\n super(ClockCycles, self).__init__()\n self.signal = signal\n self.num_cycles = num_cycles\n if rising is True:\n self._rising = 1\n else:\n self._rising = 2\n\n def prime(self, callback):\n self._callback = callback\n\n def _check(obj):\n self.unprime()\n\n if self.signal.value:\n self.num_cycles -= 1\n\n if self.num_cycles <= 0:\n self._callback(self)\n return\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass Combine(PythonTrigger):\n \"\"\"\n Combines multiple triggers together. Coroutine will continue when all\n triggers have fired\n \"\"\"\n\n def __init__(self, *args):\n PythonTrigger.__init__(self)\n self._triggers = args\n # TODO: check that trigger is an iterable containing\n # only Trigger objects\n try:\n for trigger in self._triggers:\n if not isinstance(trigger, Trigger):\n raise TriggerException(\"All combined triggers must be \"\n \"instances of Trigger! Got: %s\" %\n trigger.__class__.__name__)\n except Exception:\n raise TriggerException(\"%s requires a list of Trigger objects\" %\n self.__class__.__name__)\n\n def prime(self, callback):\n self._callback = callback\n self._fired = []\n for trigger in self._triggers:\n trigger.prime(self._check_all_fired)\n Trigger.prime(self)\n\n def _check_all_fired(self, trigger):\n self._fired.append(trigger)\n if self._fired == self._triggers:\n self._callback(self)\n\n def unprime(self):\n for trigger in self._triggers:\n trigger.unprime()\n\n\nclass _Event(PythonTrigger):\n \"\"\"\n Unique instance used by the Event object.\n\n One created for each attempt to wait on the event so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Event(PythonTrigger):\n \"\"\"\n Event to permit synchronisation between two coroutines\n \"\"\"\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending = []\n self.name = name\n self.fired = False\n self.data = None\n\n def prime(self, callback, trigger):\n self._pending.append(trigger)\n Trigger.prime(self)\n\n def set(self, data=None):\n \"\"\"Wake up any coroutines blocked on this event\"\"\"\n self.fired = True\n self.data = data\n\n p = self._pending[:]\n\n self._pending = []\n\n for trigger in p:\n trigger()\n\n def wait(self):\n \"\"\"This can be yielded to block this coroutine\n until another wakes it\"\"\"\n return _Event(self)\n\n def clear(self):\n \"\"\"Clear this event that's fired.\n\n Subsequent calls to wait will block until set() is called again\"\"\"\n self.fired = False\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.name\n\n\nclass _Lock(PythonTrigger):\n \"\"\"\n Unique instance used by the Lock object.\n\n One created for each attempt to acquire the Lock so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Lock(PythonTrigger):\n \"\"\"\n Lock primitive (not re-entrant)\n \"\"\"\n\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending_unprimed = []\n self._pending_primed = []\n self.name = name\n self.locked = False\n\n def prime(self, callback, trigger):\n Trigger.prime(self)\n\n self._pending_unprimed.remove(trigger)\n\n if not self.locked:\n self.locked = True\n callback(trigger)\n else:\n self._pending_primed.append(trigger)\n\n def acquire(self):\n \"\"\"This can be yielded to block until the lock is acquired\"\"\"\n trig = _Lock(self)\n self._pending_unprimed.append(trig)\n return trig\n\n def release(self):\n\n if not self.locked:\n raise_error(self, \"Attempt to release an unacquired Lock %s\" %\n (str(self)))\n\n self.locked = False\n\n # nobody waiting for this lock\n if not self._pending_primed:\n return\n\n trigger = self._pending_primed.pop(0)\n self.locked = True\n trigger()\n\n def __str__(self):\n return \"%s(%s) [%s waiting]\" % (str(self.__class__.__name__),\n self.name,\n len(self._pending_primed))\n\n def __nonzero__(self):\n \"\"\"Provide boolean of a Lock\"\"\"\n return self.locked\n\n __bool__ = __nonzero__\n\n\nclass NullTrigger(Trigger):\n \"\"\"\n Trigger for internal interfacing use call the callback as soon\n as it is primed and then remove it's self from the scheduler\n \"\"\"\n def __init__(self, name=\"\"):\n Trigger.__init__(self)\n self._callback = None\n self.name = name\n\n def prime(self, callback):\n callback(self)\n\n\nclass Join(PythonTrigger):\n \"\"\"\n Join a coroutine, firing when it exits\n \"\"\"\n # Ensure that each coroutine has at most one join trigger.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, coroutine):\n # find the existing instance, if possible - else create a new one\n try:\n return cls._instances[coroutine]\n except KeyError:\n instance = super(Join, cls).__new__(cls)\n cls._instances[coroutine] = instance\n return instance\n\n def __init__(self, coroutine):\n super(Join, self).__init__()\n self._coroutine = coroutine\n self.pass_retval = True\n\n @property\n def retval(self):\n return self._coroutine.retval\n\n def prime(self, callback):\n if self._coroutine._finished:\n callback(self)\n else:\n super(Join, self).prime(callback)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self._coroutine.__name__\n",
"path": "cocotb/triggers.py"
}
] | [
{
"content": "''' Copyright (c) 2013 Potential Ventures Ltd\nCopyright (c) 2013 SolarFlare Communications Inc\nAll rights reserved.\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n * Redistributions of source code must retain the above copyright\n notice, this list of conditions and the following disclaimer.\n * Redistributions in binary form must reproduce the above copyright\n notice, this list of conditions and the following disclaimer in the\n documentation and/or other materials provided with the distribution.\n * Neither the name of Potential Ventures Ltd,\n SolarFlare Communications Inc nor the\n names of its contributors may be used to endorse or promote products\n derived from this software without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\nANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\nWARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\nDISCLAIMED. IN NO EVENT SHALL POTENTIAL VENTURES LTD BE LIABLE FOR ANY\nDIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\nLOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\nON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\nSOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. '''\n\n\"\"\"\n A collections of triggers which a testbench can 'yield'\n\"\"\"\nimport os\nimport weakref\n\n# For autodocumentation don't need the extension modules\nif \"SPHINX_BUILD\" in os.environ:\n simulator = None\nelse:\n import simulator\nfrom cocotb.log import SimLog\nfrom cocotb.result import raise_error\nfrom cocotb.utils import get_sim_steps, get_time_from_sim_steps\n\n\nclass TriggerException(Exception):\n pass\n\n\nclass Trigger(object):\n \"\"\"Base class to derive from\"\"\"\n def __init__(self):\n self.log = SimLog(\"cocotb.%s\" % (self.__class__.__name__), id(self))\n self.signal = None\n self.primed = False\n\n def prime(self, *args):\n self.primed = True\n\n def unprime(self):\n \"\"\"Remove any pending callbacks if necessary\"\"\"\n self.primed = False\n\n def __del__(self):\n \"\"\"Ensure if a trigger drops out of scope we remove any pending\n callbacks\"\"\"\n self.unprime()\n\n def __str__(self):\n return self.__class__.__name__\n\n\nclass PythonTrigger(Trigger):\n \"\"\"Python triggers don't use GPI at all\n\n For example notification of coroutine completion etc\n\n TODO:\n Still need to implement unprime\n \"\"\"\n pass\n\n\nclass GPITrigger(Trigger):\n \"\"\"\n Base Trigger class for GPI triggers\n\n Consumes simulation time\n \"\"\"\n def __init__(self):\n Trigger.__init__(self)\n\n # Required to ensure documentation can build\n # if simulator is not None:\n # self.cbhdl = simulator.create_callback(self)\n # else:\n self.cbhdl = 0\n\n def unprime(self):\n \"\"\"Disable a primed trigger, can be reprimed\"\"\"\n if self.cbhdl != 0:\n simulator.deregister_callback(self.cbhdl)\n self.cbhdl = 0\n Trigger.unprime(self)\n\n def __del__(self):\n \"\"\"Remove knowledge of the trigger\"\"\"\n if self.cbhdl != 0:\n self.unprime()\n Trigger.__del__(self)\n\n\nclass Timer(GPITrigger):\n \"\"\"\n Execution will resume when the specified time period expires\n\n Consumes simulation time\n \"\"\"\n def __init__(self, time_ps, units=None):\n GPITrigger.__init__(self)\n self.sim_steps = get_sim_steps(time_ps, units)\n\n def prime(self, callback):\n \"\"\"Register for a timed callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_timed_callback(self.sim_steps,\n callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%1.2fps)\" % get_time_from_sim_steps(self.sim_steps,units='ps')\n\nclass _ReadOnly(GPITrigger):\n \"\"\"\n Execution will resume when the readonly portion of the sim cycles is\n readched\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_readonly_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readonly)\"\n\n_ro = _ReadOnly()\n\n\ndef ReadOnly():\n return _ro\n\n\nclass _ReadWrite(GPITrigger):\n \"\"\"\n Execution will resume when the readwrite portion of the sim cycles is\n reached\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n # import pdb\n # pdb.set_trace()\n self.cbhdl = simulator.register_rwsynch_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(readwritesync)\"\n\n_rw = _ReadWrite()\n\n\ndef ReadWrite():\n return _rw\n\n\nclass _NextTimeStep(GPITrigger):\n \"\"\"\n Execution will resume when the next time step is started\n \"\"\"\n def __init__(self):\n GPITrigger.__init__(self)\n\n def prime(self, callback):\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_nextstep_callback(callback, self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(nexttimestep)\"\n\n_nxts = _NextTimeStep()\n\n\ndef NextTimeStep():\n return _nxts\n\n\nclass _EdgeBase(GPITrigger):\n \"\"\"\n Execution will resume when an edge occurs on the provided signal\n \"\"\"\n @classmethod\n @property\n def _edge_type(self):\n \"\"\"\n The edge type, as understood by the C code. Must be set in subclasses\n \"\"\"\n raise NotImplementedError\n\n # Ensure that each signal has at most one edge trigger per edge type.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, signal):\n # find the existing instance, if possible - else create a new one\n key = (signal, cls._edge_type)\n try:\n return cls._instances[key]\n except KeyError:\n instance = super(_EdgeBase, cls).__new__(cls)\n cls._instances[key] = instance\n return instance\n\n def __init__(self, signal):\n super(_EdgeBase, self).__init__()\n self.signal = signal\n\n def prime(self, callback):\n \"\"\"Register notification of a value change via a callback\"\"\"\n if self.cbhdl == 0:\n self.cbhdl = simulator.register_value_change_callback(\n self.signal._handle, callback, type(self)._edge_type, self\n )\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n super(_EdgeBase, self).prime()\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass RisingEdge(_EdgeBase):\n \"\"\" Triggers on the rising edge of the provided signal \"\"\"\n _edge_type = 1\n\n\nclass FallingEdge(_EdgeBase):\n \"\"\" Triggers on the falling edge of the provided signal \"\"\"\n _edge_type = 2\n\n\nclass Edge(_EdgeBase):\n \"\"\" Triggers on either edge in a signal \"\"\"\n _edge_type = 3\n\n\nclass ClockCycles(GPITrigger):\n \"\"\"\n Execution will resume after N rising edges or N falling edges\n \"\"\"\n def __init__(self, signal, num_cycles, rising=True):\n super(ClockCycles, self).__init__()\n self.signal = signal\n self.num_cycles = num_cycles\n if rising is True:\n self._rising = 1\n else:\n self._rising = 2\n\n def prime(self, callback):\n self._callback = callback\n\n def _check(obj):\n self.unprime()\n\n if self.signal.value:\n self.num_cycles -= 1\n\n if self.num_cycles <= 0:\n self._callback(self)\n return\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n\n self.cbhdl = simulator.register_value_change_callback(self.signal.\n _handle,\n _check,\n self._rising,\n self)\n if self.cbhdl == 0:\n raise_error(self, \"Unable set up %s Trigger\" % (str(self)))\n Trigger.prime(self)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.signal._name\n\n\nclass Combine(PythonTrigger):\n \"\"\"\n Combines multiple triggers together. Coroutine will continue when all\n triggers have fired\n \"\"\"\n\n def __init__(self, *args):\n PythonTrigger.__init__(self)\n self._triggers = args\n # TODO: check that trigger is an iterable containing\n # only Trigger objects\n try:\n for trigger in self._triggers:\n if not isinstance(trigger, Trigger):\n raise TriggerException(\"All combined triggers must be \"\n \"instances of Trigger! Got: %s\" %\n trigger.__class__.__name__)\n except Exception:\n raise TriggerException(\"%s requires a list of Trigger objects\" %\n self.__class__.__name__)\n\n def prime(self, callback):\n self._callback = callback\n self._fired = []\n for trigger in self._triggers:\n trigger.prime(self._check_all_fired)\n Trigger.prime(self)\n\n def _check_all_fired(self, trigger):\n self._fired.append(trigger)\n if self._fired == self._triggers:\n self._callback(self)\n\n def unprime(self):\n for trigger in self._triggers:\n trigger.unprime()\n\n\nclass _Event(PythonTrigger):\n \"\"\"\n Unique instance used by the Event object.\n\n One created for each attempt to wait on the event so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Event(PythonTrigger):\n \"\"\"\n Event to permit synchronisation between two coroutines\n \"\"\"\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending = []\n self.name = name\n self.fired = False\n self.data = None\n\n def prime(self, callback, trigger):\n self._pending.append(trigger)\n Trigger.prime(self)\n\n def set(self, data=None):\n \"\"\"Wake up any coroutines blocked on this event\"\"\"\n self.fired = True\n self.data = data\n\n p = self._pending[:]\n\n self._pending = []\n\n for trigger in p:\n trigger()\n\n def wait(self):\n \"\"\"This can be yielded to block this coroutine\n until another wakes it\n\n If the Event has already been fired, this returns NullTrigger()\n To reset the event (and enable the use of wait() again), clear() should be called\n \"\"\"\n if self.fired:\n return NullTrigger()\n return _Event(self)\n\n def clear(self):\n \"\"\"Clear this event that's fired.\n\n Subsequent calls to wait will block until set() is called again\"\"\"\n self.fired = False\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self.name\n\n\nclass _Lock(PythonTrigger):\n \"\"\"\n Unique instance used by the Lock object.\n\n One created for each attempt to acquire the Lock so that the scheduler\n can maintain a dictionary of indexing each individual coroutine\n\n FIXME: This will leak - need to use peers to ensure everything is removed\n \"\"\"\n def __init__(self, parent):\n PythonTrigger.__init__(self)\n self.parent = parent\n\n def prime(self, callback):\n self._callback = callback\n self.parent.prime(callback, self)\n Trigger.prime(self)\n\n def __call__(self):\n self._callback(self)\n\n\nclass Lock(PythonTrigger):\n \"\"\"\n Lock primitive (not re-entrant)\n \"\"\"\n\n def __init__(self, name=\"\"):\n PythonTrigger.__init__(self)\n self._pending_unprimed = []\n self._pending_primed = []\n self.name = name\n self.locked = False\n\n def prime(self, callback, trigger):\n Trigger.prime(self)\n\n self._pending_unprimed.remove(trigger)\n\n if not self.locked:\n self.locked = True\n callback(trigger)\n else:\n self._pending_primed.append(trigger)\n\n def acquire(self):\n \"\"\"This can be yielded to block until the lock is acquired\"\"\"\n trig = _Lock(self)\n self._pending_unprimed.append(trig)\n return trig\n\n def release(self):\n\n if not self.locked:\n raise_error(self, \"Attempt to release an unacquired Lock %s\" %\n (str(self)))\n\n self.locked = False\n\n # nobody waiting for this lock\n if not self._pending_primed:\n return\n\n trigger = self._pending_primed.pop(0)\n self.locked = True\n trigger()\n\n def __str__(self):\n return \"%s(%s) [%s waiting]\" % (str(self.__class__.__name__),\n self.name,\n len(self._pending_primed))\n\n def __nonzero__(self):\n \"\"\"Provide boolean of a Lock\"\"\"\n return self.locked\n\n __bool__ = __nonzero__\n\n\nclass NullTrigger(Trigger):\n \"\"\"\n Trigger for internal interfacing use call the callback as soon\n as it is primed and then remove it's self from the scheduler\n \"\"\"\n def __init__(self, name=\"\"):\n Trigger.__init__(self)\n self._callback = None\n self.name = name\n\n def prime(self, callback):\n callback(self)\n\n\nclass Join(PythonTrigger):\n \"\"\"\n Join a coroutine, firing when it exits\n \"\"\"\n # Ensure that each coroutine has at most one join trigger.\n # Using a weak dictionary ensures we don't create a reference cycle\n _instances = weakref.WeakValueDictionary()\n\n def __new__(cls, coroutine):\n # find the existing instance, if possible - else create a new one\n try:\n return cls._instances[coroutine]\n except KeyError:\n instance = super(Join, cls).__new__(cls)\n cls._instances[coroutine] = instance\n return instance\n\n def __init__(self, coroutine):\n super(Join, self).__init__()\n self._coroutine = coroutine\n self.pass_retval = True\n\n @property\n def retval(self):\n return self._coroutine.retval\n\n def prime(self, callback):\n if self._coroutine._finished:\n callback(self)\n else:\n super(Join, self).prime(callback)\n\n def __str__(self):\n return self.__class__.__name__ + \"(%s)\" % self._coroutine.__name__\n",
"path": "cocotb/triggers.py"
}
] | diff --git a/cocotb/triggers.py b/cocotb/triggers.py
index ad59ce7a11..ec69f9a1d0 100644
--- a/cocotb/triggers.py
+++ b/cocotb/triggers.py
@@ -401,7 +401,13 @@ def set(self, data=None):
def wait(self):
"""This can be yielded to block this coroutine
- until another wakes it"""
+ until another wakes it
+
+ If the Event has already been fired, this returns NullTrigger()
+ To reset the event (and enable the use of wait() again), clear() should be called
+ """
+ if self.fired:
+ return NullTrigger()
return _Event(self)
def clear(self):
| Waiting on an event that has already fired will hang forever
We just need to check that if we have already fired then return a NullTrigger()
[Need to modify this function](https://github.com/potentialventures/cocotb/blob/0bb751d5bb80f75e7a03284284f0d46caa209ee4/cocotb/triggers.py#L402)
```python
def wait(self):
"""This can be yielded to block this coroutine
until another wakes it"""
+ if self.fired:
+ return NullTrigger()
+
return _Event(self)
```
Originally reported by @stuarthodgson
|
e-valuation__EvaP-728 | [
{
"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n",
"path": "evap/student/views.py"
}
] | [
{
"content": "from django.contrib import messages\nfrom django.core.exceptions import PermissionDenied\nfrom django.db import transaction\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.utils.translation import ugettext as _\n\nfrom evap.evaluation.auth import participant_required\nfrom evap.evaluation.models import Course, Semester\nfrom evap.evaluation.tools import STUDENT_STATES_ORDERED\n\nfrom evap.student.forms import QuestionsForm\nfrom evap.student.tools import make_form_identifier\n\nfrom collections import OrderedDict\n\n@participant_required\ndef index(request):\n # retrieve all courses, where the user is a participant and that are not new\n courses = list(set(Course.objects.filter(participants=request.user).exclude(state=\"new\")))\n voted_courses = list(set(Course.objects.filter(voters=request.user)))\n due_courses = list(set(Course.objects.filter(participants=request.user, state='inEvaluation').exclude(voters=request.user)))\n\n sorter = lambda course: (list(STUDENT_STATES_ORDERED.keys()).index(course.student_state), course.vote_end_date, course.name)\n courses.sort(key=sorter)\n\n semesters = Semester.objects.all()\n semester_list = [dict(semester_name=semester.name, id=semester.id, courses=[course for course in courses if course.semester_id == semester.id]) for semester in semesters]\n\n template_data = dict(\n semester_list=semester_list,\n voted_courses=voted_courses,\n due_courses=due_courses,\n can_download_grades=request.user.can_download_grades,\n )\n return render(request, \"student_index.html\", template_data)\n\n\ndef vote_preview(request, course):\n \"\"\"\n Renders a preview of the voting page for the given course.\n Not used by the student app itself, but by staff and contributor.\n \"\"\"\n form_groups = helper_create_voting_form_groups(request, course.contributions.all())\n course_form_group = form_groups.pop(course.general_contribution)\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, False) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=False,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n preview=True)\n return render(request, \"student_vote.html\", template_data)\n\n\n@participant_required\ndef vote(request, course_id):\n # retrieve course and make sure that the user is allowed to vote\n course = get_object_or_404(Course, id=course_id)\n if not course.can_user_vote(request.user):\n raise PermissionDenied\n\n # prevent a user from voting on themselves.\n contributions_to_vote_on = course.contributions.exclude(contributor=request.user).all()\n form_groups = helper_create_voting_form_groups(request, contributions_to_vote_on)\n\n if not all(all(form.is_valid() for form in form_group) for form_group in form_groups.values()):\n errors_exist = any(helper_has_errors(form_group) for form_group in form_groups.values())\n\n course_form_group = form_groups.pop(course.general_contribution)\n\n contributor_form_groups = list((contribution.contributor, contribution.label, form_group, helper_has_errors(form_group)) for contribution, form_group in form_groups.items())\n\n template_data = dict(\n errors_exist=errors_exist,\n course_form_group=course_form_group,\n contributor_form_groups=contributor_form_groups,\n course=course,\n participants_warning=course.num_participants <= 5,\n preview=False)\n return render(request, \"student_vote.html\", template_data)\n\n # all forms are valid, begin vote operation\n with transaction.atomic():\n for contribution, form_group in form_groups.items():\n for questionnaire_form in form_group:\n questionnaire = questionnaire_form.questionnaire\n for question in questionnaire.question_set.all():\n identifier = make_form_identifier(contribution, questionnaire, question)\n value = questionnaire_form.cleaned_data.get(identifier)\n\n if question.is_text_question:\n if value:\n question.answer_class.objects.create(\n contribution=contribution,\n question=question,\n answer=value)\n else:\n if value != 6:\n answer_counter, created = question.answer_class.objects.get_or_create(contribution=contribution, question=question, answer=value)\n answer_counter.add_vote()\n answer_counter.save()\n\n # remember that the user voted already\n course.voters.add(request.user)\n\n course.was_evaluated(request)\n\n messages.success(request, _(\"Your vote was recorded.\"))\n return redirect('student:index')\n\n\ndef helper_create_form_group(request, contribution):\n return list(QuestionsForm(request.POST or None, contribution=contribution, questionnaire=questionnaire) for questionnaire in contribution.questionnaires.all())\n\ndef helper_create_voting_form_groups(request, contributions):\n form_groups = OrderedDict()\n for contribution in contributions:\n form_groups[contribution] = helper_create_form_group(request, contribution)\n return form_groups\n\ndef helper_has_errors(form_group):\n return any(form.errors for form in form_group)\n",
"path": "evap/student/views.py"
}
] | diff --git a/evap/static/css/evap.css b/evap/static/css/evap.css
index 7a10b6fb7e..83c59be318 100644
--- a/evap/static/css/evap.css
+++ b/evap/static/css/evap.css
@@ -294,12 +294,12 @@ Side notes for calling out things
/* Themes for different contexts */
.bs-callout-danger {
- background-color: #fcf2f2;
- border-color: #dFb5b4;
+ background-color: #fce4e4;
+ border-color: #eb595a;
}
.bs-callout-warning {
- background-color: #fefbed;
- border-color: #f1e7bc;
+ background-color: #fbf7d0;
+ border-color: #efe258;
}
.bs-callout-info {
background-color: #d9edf7;
diff --git a/evap/student/templates/student_vote.html b/evap/student/templates/student_vote.html
index 4e033e2df4..a419cfc351 100644
--- a/evap/student/templates/student_vote.html
+++ b/evap/student/templates/student_vote.html
@@ -18,6 +18,9 @@
{% if errors_exist %}
<div class="alert alert-danger" role="alert">{% blocktrans %}Please make sure to vote for all rating questions. You can also click on "I can't give feedback" to skip the questions about a single person.{% endblocktrans %}</div>
{% endif %}
+ {% if participants_warning %}
+ <div class="bs-callout bs-callout-warning">{% blocktrans %}This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published.{% endblocktrans %}</div>
+ {% endif %}
{% if preview %}
<small>
<div class="bs-callout bs-callout-info">
diff --git a/evap/student/views.py b/evap/student/views.py
index cd257d1933..9b253103ee 100644
--- a/evap/student/views.py
+++ b/evap/student/views.py
@@ -76,6 +76,7 @@ def vote(request, course_id):
course_form_group=course_form_group,
contributor_form_groups=contributor_form_groups,
course=course,
+ participants_warning=course.num_participants <= 5,
preview=False)
return render(request, "student_vote.html", template_data)
| Warning in courses with small number of participants
In courses with 5 or less participants a warning should be shown above the course's questionnaire:
_This course has only a small number of participants. Please remember that your comments will be visible for the responsible person and the contributors you're evaluating. If two or more people evaluate the course, the results of all voting questions will also be published._
|
WordPress__openverse-api-723 | [
{
"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n",
"path": "api/catalog/custom_auto_schema.py"
}
] | [
{
"content": "from drf_yasg import openapi\nfrom drf_yasg.inspectors import SwaggerAutoSchema\nfrom drf_yasg.utils import filter_none, force_real_str\n\n\nclass CustomAutoSchema(SwaggerAutoSchema):\n def get_pagination_parameters(self):\n \"\"\"\n Since the pagination params are a part of the ``MediaSearchRequestSerializer``,\n they need not be added again as pagination params.\n \"\"\"\n\n return []\n\n def get_operation(self, operation_keys=None):\n operation_keys = operation_keys or self.operation_keys\n\n consumes = self.get_consumes()\n produces = self.get_produces()\n\n body = self.get_request_body_parameters(consumes)\n query = self.get_query_parameters()\n parameters = body + query\n parameters = filter_none(parameters)\n parameters = self.add_manual_parameters(parameters)\n\n operation_id = self.get_operation_id(operation_keys)\n summary, description = self.get_summary_and_description()\n security = self.get_security()\n assert security is None or isinstance(\n security, list\n ), \"security must be a list of security requirement objects\"\n deprecated = self.is_deprecated()\n tags = self.get_tags(operation_keys)\n\n responses = self.get_responses()\n\n return openapi.Operation(\n operation_id=operation_id,\n description=force_real_str(description),\n summary=force_real_str(summary),\n responses=responses,\n parameters=parameters,\n consumes=consumes,\n produces=produces,\n tags=tags,\n security=security,\n deprecated=deprecated,\n **{\"x-code-samples\": self.overrides.get(\"code_examples\")}\n )\n",
"path": "api/catalog/custom_auto_schema.py"
}
] | diff --git a/.github/workflows/ci_cd.yml b/.github/workflows/ci_cd.yml
index 5676ab79e..46d7d5f24 100644
--- a/.github/workflows/ci_cd.yml
+++ b/.github/workflows/ci_cd.yml
@@ -142,6 +142,10 @@ jobs:
- name: Start API, ingest and index test data
run: just init
+ - name: Smoke test ReDoc site
+ run: |
+ curl --fail 'http://localhost:8000/v1/?format=openapi'
+
- name: Run API tests
run: just api-test
diff --git a/api/catalog/custom_auto_schema.py b/api/catalog/custom_auto_schema.py
index 6e3b13b03..6cdbc94f4 100644
--- a/api/catalog/custom_auto_schema.py
+++ b/api/catalog/custom_auto_schema.py
@@ -4,6 +4,14 @@
class CustomAutoSchema(SwaggerAutoSchema):
+ def get_pagination_parameters(self):
+ """
+ Since the pagination params are a part of the ``MediaSearchRequestSerializer``,
+ they need not be added again as pagination params.
+ """
+
+ return []
+
def get_operation(self, operation_keys=None):
operation_keys = operation_keys or self.operation_keys
| Swagger/ReDoc page raises an error
## Description
<!-- Concisely describe the bug. Compare your experience with what you expected to happen. -->
<!-- For example: "I clicked the 'submit' button and instead of seeing a thank you message, I saw a blank page." -->
While deploying [v2.5.2](https://github.com/WordPress/openverse-api/releases/tag/v2.5.2) to staging, we noticed that the API documentation page failed to render and caused this error:
```
[2022-05-25 17:02:32,253 - django.request - 241][ERROR] Internal Server Error: /v1/
Traceback (most recent call last):
File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 110, in __getattr__
return self[make_swagger_name(item)]
KeyError: 'name'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/venv/lib/python3.10/site-packages/django/core/handlers/exception.py", line 55, in inner
response = get_response(request)
File "/venv/lib/python3.10/site-packages/django/core/handlers/base.py", line 197, in _get_response
response = wrapped_callback(request, *callback_args, **callback_kwargs)
File "/venv/lib/python3.10/site-packages/sentry_sdk/integrations/django/views.py", line 67, in sentry_wrapped_callback
return callback(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 34, in _wrapped_view_func
response = view_func(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/utils/decorators.py", line 133, in _wrapped_view
response = view_func(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/decorators/vary.py", line 21, in inner_func
response = func(*args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/decorators/csrf.py", line 54, in wrapped_view
return view_func(*args, **kwargs)
File "/venv/lib/python3.10/site-packages/django/views/generic/base.py", line 84, in view
return self.dispatch(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 509, in dispatch
response = self.handle_exception(exc)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 469, in handle_exception
self.raise_uncaught_exception(exc)
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 480, in raise_uncaught_exception
raise exc
File "/venv/lib/python3.10/site-packages/rest_framework/views.py", line 506, in dispatch
response = handler(request, *args, **kwargs)
File "/venv/lib/python3.10/site-packages/drf_yasg/views.py", line 94, in get
schema = generator.get_schema(request, self.public)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 246, in get_schema
paths, prefix = self.get_paths(endpoints, components, request, public)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 404, in get_paths
operation = self.get_operation(view, path, prefix, method, components, request)
File "/venv/lib/python3.10/site-packages/drf_yasg/generators.py", line 446, in get_operation
operation = view_inspector.get_operation(operation_keys)
File "/api/catalog/custom_auto_schema.py", line 14, in get_operation
query = self.get_query_parameters()
File "/venv/lib/python3.10/site-packages/drf_yasg/inspectors/view.py", line 298, in get_query_parameters
if len(set(param_list_to_odict(natural_parameters)) & set(param_list_to_odict(serializer_parameters))) != 0:
File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in param_list_to_odict
result = OrderedDict(((param.name, param.in_), param) for param in parameters)
File "/venv/lib/python3.10/site-packages/drf_yasg/utils.py", line 266, in <genexpr>
result = OrderedDict(((param.name, param.in_), param) for param in parameters)
File "/venv/lib/python3.10/site-packages/drf_yasg/openapi.py", line 113, in __getattr__
raise AttributeError("object of class " + type(self).__name__ + " has no attribute " + item)
AttributeError: object of class Parameter has no attribute name
```
Here's the error the page presents:
```
Something went wrong...
Error downloading http://localhost:8000/v1/?format=openapi HTTP ERROR 500
Stack trace
s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651
read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:36080
s/<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:44:26651
read/</<@http://localhost:8000/static/drf-yasg/redoc/redoc.min.js:95:35658
ReDoc Version: 2.0.0-rc.40
Commit: 17b9873
```
## Reproduction
<!-- Provide detailed steps to reproduce the bug. -->
1. `git checkout v2.5.2`
2. `just build`
3. `just recreate && just init`
4. Visit localhost:8000 and observe error
## Additional context
<!-- Add any other context about the problem here; or delete the section entirely. -->
Sentry issue: https://sentry.io/share/issue/83044216200d47538f3733a16df46adc/
## Resolution
<!-- Replace the [ ] with [x] to check the box. -->
- [ ] 🙋 I would be interested in resolving this bug.
|
xorbitsai__inference-758 | [
{
"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, TypedDict, Union\n\nfrom ....constants import XINFERENCE_DISABLE_VLLM\nfrom ....types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n Completion,\n CompletionChoice,\n CompletionChunk,\n CompletionUsage,\n)\nfrom .. import LLM, LLMFamilyV1, LLMSpecV1\nfrom ..utils import ChatModelMixin\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from vllm.outputs import RequestOutput\n\n\nclass VLLMModelConfig(TypedDict, total=False):\n tokenizer_mode: Optional[str]\n trust_remote_code: bool\n tensor_parallel_size: int\n block_size: int\n swap_space: int # GiB\n gpu_memory_utilization: float\n max_num_batched_tokens: int\n max_num_seqs: int\n quantization: Optional[str]\n\n\nclass VLLMGenerateConfig(TypedDict, total=False):\n n: int\n best_of: Optional[int]\n presence_penalty: float\n frequency_penalty: float\n temperature: float\n top_p: float\n max_tokens: int\n stop_token_ids: Optional[List[int]]\n stop: Optional[Union[str, List[str]]]\n stream: bool # non-sampling param, should not be passed to the engine.\n\n\ntry:\n import vllm # noqa: F401\n\n VLLM_INSTALLED = True\nexcept ImportError:\n VLLM_INSTALLED = False\n\nVLLM_SUPPORTED_MODELS = [\"llama-2\", \"baichuan\", \"internlm-16k\", \"mistral-v0.1\"]\nVLLM_SUPPORTED_CHAT_MODELS = [\n \"llama-2-chat\",\n \"vicuna-v1.3\",\n \"vicuna-v1.5\",\n \"baichuan-chat\",\n \"internlm-chat-7b\",\n \"internlm-chat-8k\",\n \"internlm-chat-20b\",\n \"qwen-chat\",\n \"Yi\",\n \"Yi-chat\",\n \"code-llama\",\n \"code-llama-python\",\n \"code-llama-instruct\",\n \"mistral-instruct-v0.1\",\n \"chatglm3\",\n]\n\n\nclass VLLMModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[VLLMModelConfig],\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._model_config = model_config\n self._engine = None\n\n def load(self):\n try:\n from vllm.engine.arg_utils import AsyncEngineArgs\n from vllm.engine.async_llm_engine import AsyncLLMEngine\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._model_config = self._sanitize_model_config(self._model_config)\n logger.info(\n f\"Loading {self.model_uid} with following model config: {self._model_config}\"\n )\n\n engine_args = AsyncEngineArgs(model=self.model_path, **self._model_config)\n self._engine = AsyncLLMEngine.from_engine_args(engine_args)\n\n def _sanitize_model_config(\n self, model_config: Optional[VLLMModelConfig]\n ) -> VLLMModelConfig:\n if model_config is None:\n model_config = VLLMModelConfig()\n\n cuda_count = self._get_cuda_count()\n\n model_config.setdefault(\"tokenizer_mode\", \"auto\")\n model_config.setdefault(\"trust_remote_code\", True)\n model_config.setdefault(\"tensor_parallel_size\", cuda_count)\n model_config.setdefault(\"block_size\", 16)\n model_config.setdefault(\"swap_space\", 4)\n model_config.setdefault(\"gpu_memory_utilization\", 0.90)\n model_config.setdefault(\"max_num_seqs\", 256)\n model_config.setdefault(\"quantization\", None)\n\n return model_config\n\n @staticmethod\n def _sanitize_generate_config(\n generate_config: Optional[Dict] = None,\n ) -> VLLMGenerateConfig:\n if not generate_config:\n generate_config = {}\n\n sanitized = VLLMGenerateConfig()\n sanitized.setdefault(\"n\", generate_config.get(\"n\", 1))\n sanitized.setdefault(\"best_of\", generate_config.get(\"best_of\", None))\n sanitized.setdefault(\n \"presence_penalty\", generate_config.get(\"presence_penalty\", 0.0)\n )\n sanitized.setdefault(\n \"frequency_penalty\", generate_config.get(\"frequency_penalty\", 0.0)\n )\n sanitized.setdefault(\"temperature\", generate_config.get(\"temperature\", 1.0))\n sanitized.setdefault(\"top_p\", generate_config.get(\"top_p\", 1.0))\n sanitized.setdefault(\"max_tokens\", generate_config.get(\"max_tokens\", 16))\n sanitized.setdefault(\"stop\", generate_config.get(\"stop\", None))\n sanitized.setdefault(\n \"stop_token_ids\", generate_config.get(\"stop_token_ids\", None)\n )\n sanitized.setdefault(\"stream\", generate_config.get(\"stream\", None))\n\n return sanitized\n\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if not cls._has_cuda_device():\n return False\n if not cls._is_linux():\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_MODELS:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n @staticmethod\n def _convert_request_output_to_completion_chunk(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> CompletionChunk:\n choices: List[CompletionChoice] = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n return CompletionChunk(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n )\n\n @staticmethod\n def _convert_request_output_to_completion(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> Completion:\n choices = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n\n prompt_tokens = len(request_output.prompt_token_ids)\n completion_tokens = sum(\n len(output.token_ids) for output in request_output.outputs\n )\n usage = CompletionUsage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n )\n return Completion(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n usage=usage,\n )\n\n async def async_generate(\n self,\n prompt: str,\n generate_config: Optional[Dict] = None,\n ) -> Union[Completion, AsyncGenerator[CompletionChunk, None]]:\n try:\n from vllm.sampling_params import SamplingParams\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n sanitized_generate_config = self._sanitize_generate_config(generate_config)\n logger.debug(\n \"Enter generate, prompt: %s, generate config: %s\", prompt, generate_config\n )\n\n stream = sanitized_generate_config.pop(\"stream\")\n sampling_params = SamplingParams(**sanitized_generate_config)\n request_id = str(uuid.uuid1())\n\n assert self._engine is not None\n results_generator = self._engine.generate(prompt, sampling_params, request_id)\n\n async def stream_results() -> AsyncGenerator[CompletionChunk, None]:\n previous_texts = [\"\"] * sanitized_generate_config[\"n\"]\n async for _request_output in results_generator:\n chunk = self._convert_request_output_to_completion_chunk(\n request_id=request_id,\n model=self.model_uid,\n request_output=_request_output,\n )\n for i, choice in enumerate(chunk[\"choices\"]):\n delta = choice[\"text\"][len(previous_texts[i]) :]\n previous_texts[i] = choice[\"text\"]\n choice[\"text\"] = delta\n yield chunk\n\n if stream:\n return stream_results()\n else:\n final_output = None\n async for request_output in results_generator:\n final_output = request_output\n\n assert final_output is not None\n return self._convert_request_output_to_completion(\n request_id, model=self.model_uid, request_output=final_output\n )\n\n\nclass VLLMChatModel(VLLMModel, ChatModelMixin):\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_CHAT_MODELS:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n def _sanitize_chat_config(\n self,\n generate_config: Optional[Dict] = None,\n ) -> Dict:\n if not generate_config:\n generate_config = {}\n if self.model_family.prompt_style:\n if (\n not generate_config.get(\"stop\")\n ) and self.model_family.prompt_style.stop:\n generate_config[\"stop\"] = self.model_family.prompt_style.stop.copy()\n if self.model_family.prompt_style.stop_token_ids:\n generate_config.setdefault(\n \"stop_token_ids\",\n self.model_family.prompt_style.stop_token_ids.copy(),\n )\n return generate_config\n\n async def async_chat(\n self,\n prompt: str,\n system_prompt: Optional[str] = None,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[Dict] = None,\n ) -> Union[ChatCompletion, AsyncGenerator[ChatCompletionChunk, None]]:\n assert self.model_family.prompt_style is not None\n prompt_style = self.model_family.prompt_style.copy()\n if system_prompt:\n prompt_style.system_prompt = system_prompt\n chat_history = chat_history or []\n full_prompt = self.get_prompt(prompt, chat_history, prompt_style)\n\n sanitized = self._sanitize_chat_config(generate_config)\n stream = sanitized[\"stream\"]\n\n if stream:\n agen = await self.async_generate(full_prompt, sanitized)\n assert isinstance(agen, AsyncGenerator)\n return self._async_to_chat_completion_chunks(agen)\n else:\n c = await self.async_generate(full_prompt, sanitized)\n assert not isinstance(c, AsyncGenerator)\n return self._to_chat_completion(c)\n",
"path": "xinference/model/llm/vllm/core.py"
}
] | [
{
"content": "# Copyright 2022-2023 XProbe Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport time\nimport uuid\nfrom typing import TYPE_CHECKING, AsyncGenerator, Dict, List, Optional, TypedDict, Union\n\nfrom ....constants import XINFERENCE_DISABLE_VLLM\nfrom ....types import (\n ChatCompletion,\n ChatCompletionChunk,\n ChatCompletionMessage,\n Completion,\n CompletionChoice,\n CompletionChunk,\n CompletionUsage,\n)\nfrom .. import LLM, LLMFamilyV1, LLMSpecV1\nfrom ..utils import ChatModelMixin\n\nlogger = logging.getLogger(__name__)\n\nif TYPE_CHECKING:\n from vllm.outputs import RequestOutput\n\n\nclass VLLMModelConfig(TypedDict, total=False):\n tokenizer_mode: Optional[str]\n trust_remote_code: bool\n tensor_parallel_size: int\n block_size: int\n swap_space: int # GiB\n gpu_memory_utilization: float\n max_num_batched_tokens: int\n max_num_seqs: int\n quantization: Optional[str]\n\n\nclass VLLMGenerateConfig(TypedDict, total=False):\n n: int\n best_of: Optional[int]\n presence_penalty: float\n frequency_penalty: float\n temperature: float\n top_p: float\n max_tokens: int\n stop_token_ids: Optional[List[int]]\n stop: Optional[Union[str, List[str]]]\n stream: bool # non-sampling param, should not be passed to the engine.\n\n\ntry:\n import vllm # noqa: F401\n\n VLLM_INSTALLED = True\nexcept ImportError:\n VLLM_INSTALLED = False\n\nVLLM_SUPPORTED_MODELS = [\"llama-2\", \"baichuan\", \"internlm-16k\", \"mistral-v0.1\"]\nVLLM_SUPPORTED_CHAT_MODELS = [\n \"llama-2-chat\",\n \"vicuna-v1.3\",\n \"vicuna-v1.5\",\n \"baichuan-chat\",\n \"internlm-chat-7b\",\n \"internlm-chat-8k\",\n \"internlm-chat-20b\",\n \"qwen-chat\",\n \"Yi\",\n \"Yi-chat\",\n \"code-llama\",\n \"code-llama-python\",\n \"code-llama-instruct\",\n \"mistral-instruct-v0.1\",\n \"chatglm3\",\n]\n\n\nclass VLLMModel(LLM):\n def __init__(\n self,\n model_uid: str,\n model_family: \"LLMFamilyV1\",\n model_spec: \"LLMSpecV1\",\n quantization: str,\n model_path: str,\n model_config: Optional[VLLMModelConfig],\n ):\n super().__init__(model_uid, model_family, model_spec, quantization, model_path)\n self._model_config = model_config\n self._engine = None\n\n def load(self):\n try:\n from vllm.engine.arg_utils import AsyncEngineArgs\n from vllm.engine.async_llm_engine import AsyncLLMEngine\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n self._model_config = self._sanitize_model_config(self._model_config)\n logger.info(\n f\"Loading {self.model_uid} with following model config: {self._model_config}\"\n )\n\n engine_args = AsyncEngineArgs(model=self.model_path, **self._model_config)\n self._engine = AsyncLLMEngine.from_engine_args(engine_args)\n\n def _sanitize_model_config(\n self, model_config: Optional[VLLMModelConfig]\n ) -> VLLMModelConfig:\n if model_config is None:\n model_config = VLLMModelConfig()\n\n cuda_count = self._get_cuda_count()\n\n model_config.setdefault(\"tokenizer_mode\", \"auto\")\n model_config.setdefault(\"trust_remote_code\", True)\n model_config.setdefault(\"tensor_parallel_size\", cuda_count)\n model_config.setdefault(\"block_size\", 16)\n model_config.setdefault(\"swap_space\", 4)\n model_config.setdefault(\"gpu_memory_utilization\", 0.90)\n model_config.setdefault(\"max_num_seqs\", 256)\n model_config.setdefault(\"quantization\", None)\n\n return model_config\n\n @staticmethod\n def _sanitize_generate_config(\n generate_config: Optional[Dict] = None,\n ) -> VLLMGenerateConfig:\n if not generate_config:\n generate_config = {}\n\n sanitized = VLLMGenerateConfig()\n sanitized.setdefault(\"n\", generate_config.get(\"n\", 1))\n sanitized.setdefault(\"best_of\", generate_config.get(\"best_of\", None))\n sanitized.setdefault(\n \"presence_penalty\", generate_config.get(\"presence_penalty\", 0.0)\n )\n sanitized.setdefault(\n \"frequency_penalty\", generate_config.get(\"frequency_penalty\", 0.0)\n )\n sanitized.setdefault(\"temperature\", generate_config.get(\"temperature\", 1.0))\n sanitized.setdefault(\"top_p\", generate_config.get(\"top_p\", 1.0))\n sanitized.setdefault(\"max_tokens\", generate_config.get(\"max_tokens\", 16))\n sanitized.setdefault(\"stop\", generate_config.get(\"stop\", None))\n sanitized.setdefault(\n \"stop_token_ids\", generate_config.get(\"stop_token_ids\", None)\n )\n sanitized.setdefault(\"stream\", generate_config.get(\"stream\", None))\n\n return sanitized\n\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if not cls._has_cuda_device():\n return False\n if not cls._is_linux():\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_MODELS:\n return False\n if \"generate\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n @staticmethod\n def _convert_request_output_to_completion_chunk(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> CompletionChunk:\n choices: List[CompletionChoice] = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n return CompletionChunk(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n )\n\n @staticmethod\n def _convert_request_output_to_completion(\n request_id: str, model: str, request_output: \"RequestOutput\"\n ) -> Completion:\n choices = []\n for output in request_output.outputs:\n choices.append(\n CompletionChoice(\n text=output.text,\n index=output.index,\n logprobs=None, # TODO: support logprobs.\n finish_reason=output.finish_reason,\n )\n )\n\n prompt_tokens = len(request_output.prompt_token_ids)\n completion_tokens = sum(\n len(output.token_ids) for output in request_output.outputs\n )\n usage = CompletionUsage(\n prompt_tokens=prompt_tokens,\n completion_tokens=completion_tokens,\n total_tokens=prompt_tokens + completion_tokens,\n )\n return Completion(\n id=request_id,\n object=\"text_completion\",\n created=int(time.time()),\n model=model,\n choices=choices,\n usage=usage,\n )\n\n async def async_generate(\n self,\n prompt: str,\n generate_config: Optional[Dict] = None,\n ) -> Union[Completion, AsyncGenerator[CompletionChunk, None]]:\n try:\n from vllm.sampling_params import SamplingParams\n except ImportError:\n error_message = \"Failed to import module 'vllm'\"\n installation_guide = [\n \"Please make sure 'vllm' is installed. \",\n \"You can install it by `pip install vllm`\\n\",\n ]\n\n raise ImportError(f\"{error_message}\\n\\n{''.join(installation_guide)}\")\n\n sanitized_generate_config = self._sanitize_generate_config(generate_config)\n logger.debug(\n \"Enter generate, prompt: %s, generate config: %s\", prompt, generate_config\n )\n\n stream = sanitized_generate_config.pop(\"stream\")\n sampling_params = SamplingParams(**sanitized_generate_config)\n request_id = str(uuid.uuid1())\n\n assert self._engine is not None\n results_generator = self._engine.generate(prompt, sampling_params, request_id)\n\n async def stream_results() -> AsyncGenerator[CompletionChunk, None]:\n previous_texts = [\"\"] * sanitized_generate_config[\"n\"]\n async for _request_output in results_generator:\n chunk = self._convert_request_output_to_completion_chunk(\n request_id=request_id,\n model=self.model_uid,\n request_output=_request_output,\n )\n for i, choice in enumerate(chunk[\"choices\"]):\n delta = choice[\"text\"][len(previous_texts[i]) :]\n previous_texts[i] = choice[\"text\"]\n choice[\"text\"] = delta\n yield chunk\n\n if stream:\n return stream_results()\n else:\n final_output = None\n async for request_output in results_generator:\n final_output = request_output\n\n assert final_output is not None\n return self._convert_request_output_to_completion(\n request_id, model=self.model_uid, request_output=final_output\n )\n\n\nclass VLLMChatModel(VLLMModel, ChatModelMixin):\n @classmethod\n def match(\n cls, llm_family: \"LLMFamilyV1\", llm_spec: \"LLMSpecV1\", quantization: str\n ) -> bool:\n if XINFERENCE_DISABLE_VLLM:\n return False\n if quantization != \"none\":\n return False\n if llm_spec.model_format != \"pytorch\":\n return False\n if llm_family.model_name not in VLLM_SUPPORTED_CHAT_MODELS:\n return False\n if \"chat\" not in llm_family.model_ability:\n return False\n return VLLM_INSTALLED\n\n def _sanitize_chat_config(\n self,\n generate_config: Optional[Dict] = None,\n ) -> Dict:\n if not generate_config:\n generate_config = {}\n if self.model_family.prompt_style:\n if (\n not generate_config.get(\"stop\")\n ) and self.model_family.prompt_style.stop:\n generate_config[\"stop\"] = self.model_family.prompt_style.stop.copy()\n if self.model_family.prompt_style.stop_token_ids:\n generate_config.setdefault(\n \"stop_token_ids\",\n self.model_family.prompt_style.stop_token_ids.copy(),\n )\n return generate_config\n\n async def async_chat(\n self,\n prompt: str,\n system_prompt: Optional[str] = None,\n chat_history: Optional[List[ChatCompletionMessage]] = None,\n generate_config: Optional[Dict] = None,\n ) -> Union[ChatCompletion, AsyncGenerator[ChatCompletionChunk, None]]:\n assert self.model_family.prompt_style is not None\n prompt_style = self.model_family.prompt_style.copy()\n if system_prompt:\n prompt_style.system_prompt = system_prompt\n chat_history = chat_history or []\n full_prompt = self.get_prompt(prompt, chat_history, prompt_style)\n\n sanitized = self._sanitize_chat_config(generate_config)\n stream = sanitized.get(\"stream\", None)\n\n if stream:\n agen = await self.async_generate(full_prompt, sanitized)\n assert isinstance(agen, AsyncGenerator)\n return self._async_to_chat_completion_chunks(agen)\n else:\n c = await self.async_generate(full_prompt, sanitized)\n assert not isinstance(c, AsyncGenerator)\n return self._to_chat_completion(c)\n",
"path": "xinference/model/llm/vllm/core.py"
}
] | diff --git a/xinference/model/llm/vllm/core.py b/xinference/model/llm/vllm/core.py
index ad5b9e8297..9cdfaa94b7 100644
--- a/xinference/model/llm/vllm/core.py
+++ b/xinference/model/llm/vllm/core.py
@@ -349,7 +349,7 @@ async def async_chat(
full_prompt = self.get_prompt(prompt, chat_history, prompt_style)
sanitized = self._sanitize_chat_config(generate_config)
- stream = sanitized["stream"]
+ stream = sanitized.get("stream", None)
if stream:
agen = await self.async_generate(full_prompt, sanitized)
| BUG:vllm bug
### Describe the bug
KeyError: [address=172.22.227.26:33767, pid=20969] 'stream'
### To Reproduce
To help us to reproduce this bug, please provide information below:
```
Traceback (most recent call last):
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/api/restful_api.py", line 824, in create_chat_completion
data = await model.chat(prompt, system_prompt, chat_history, kwargs)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py", line 227, in send
return self._process_result_message(result)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/context.py", line 102, in _process_result_message
raise message.as_instanceof_cause()
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py", line 657, in send
result = await self._run_coro(message.message_id, coro)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/backends/pool.py", line 368, in _run_coro
return await coro
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xoscar/api.py", line 306, in __on_receive__
return await super().__on_receive__(message) # type: ignore
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 558, in __on_receive__
raise ex
File "xoscar/core.pyx", line 520, in xoscar.core._BaseActor.__on_receive__
async with self._lock:
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 521, in xoscar.core._BaseActor.__on_receive__
with debug_async_timeout('actor_lock_timeout',
^^^^^^^^^^^^^^^^^
File "xoscar/core.pyx", line 526, in xoscar.core._BaseActor.__on_receive__
result = await result
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/utils.py", line 33, in wrapped
ret = await func(*args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 77, in wrapped_func
ret = await fn(self, *args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 272, in chat
return await self._call_async_wrapper(_async_wrapper)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 223, in _call_async_wrapper
return await asyncio.create_task(_wrapper())
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/core/model.py", line 268, in _async_wrapper
await getattr(self._model, "async_chat")(prompt, *args, **kwargs)
^^^^^^^^^^^^^^^^^
File "/home/jingtianran/anaconda3/envs/xinference/lib/python3.11/site-packages/xinference/model/llm/vllm/core.py", line 348, in async_chat
stream = sanitized["stream"]
^^^^^^^^^^^^^^^^^
KeyError: [address=172.22.227.26:33767, pid=20969] 'stream'
```
1. Your Python version.
2. The version of xinference you use.
3. Versions of crucial packages.
4. Full stack of the error.
5. Minimized code to reproduce the error.
### Expected behavior
A clear and concise description of what you expected to happen.
### Additional context
Add any other context about the problem here.
|
GeotrekCE__Geotrek-admin-2644 | [
{
"content": "import os\nfrom datetime import datetime\n\nfrom django.db.models import Q, Min, Max\nfrom django.db.models.functions import ExtractYear\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.geos import GeometryCollection\n\nfrom mapentity.models import MapEntityMixin\n\nfrom geotrek.authent.models import StructureRelated, StructureOrNoneRelated\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.core.models import Topology, Path, Trail\nfrom geotrek.common.models import Organism\nfrom geotrek.common.mixins import TimeStampedModelMixin, NoDeleteMixin, AddPropertyMixin, NoDeleteManager\nfrom geotrek.common.utils import classproperty\nfrom geotrek.infrastructure.models import Infrastructure\nfrom geotrek.signage.models import Signage\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n from geotrek.signage.models import Blade\n\n\nclass InterventionManager(NoDeleteManager):\n def year_choices(self):\n return self.existing().filter(date__isnull=False).annotate(year=ExtractYear('date')) \\\n .order_by('-year').distinct().values_list('year', 'year')\n\n\nclass Intervention(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, AltimetryMixin,\n TimeStampedModelMixin, StructureRelated, NoDeleteMixin):\n\n target_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)\n target_id = models.PositiveIntegerField(blank=True, null=True)\n target = GenericForeignKey('target_type', 'target_id')\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128, help_text=_(\"Brief summary\"))\n date = models.DateField(default=datetime.now, verbose_name=_(\"Date\"), help_text=_(\"When ?\"))\n subcontracting = models.BooleanField(verbose_name=_(\"Subcontracting\"), default=False)\n\n # Technical information\n width = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Width\"))\n height = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Height\"))\n area = models.FloatField(editable=False, default=0, blank=True, null=True, verbose_name=_(\"Area\"))\n\n # Costs\n material_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Material cost\"))\n heliport_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Heliport cost\"))\n subcontract_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Subcontract cost\"))\n\n # AltimetyMixin for denormalized fields from related topology, updated via trigger.\n length = models.FloatField(editable=True, default=0.0, null=True, blank=True, verbose_name=_(\"3D Length\"))\n\n stake = models.ForeignKey('core.Stake', null=True, blank=True, on_delete=models.CASCADE,\n related_name='interventions', verbose_name=_(\"Stake\"))\n\n status = models.ForeignKey('InterventionStatus', verbose_name=_(\"Status\"), on_delete=models.CASCADE)\n\n type = models.ForeignKey('InterventionType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n\n disorders = models.ManyToManyField('InterventionDisorder', related_name=\"interventions\",\n verbose_name=_(\"Disorders\"), blank=True)\n\n jobs = models.ManyToManyField('InterventionJob', through='ManDay', verbose_name=_(\"Jobs\"))\n\n project = models.ForeignKey('Project', null=True, blank=True, related_name=\"interventions\",\n on_delete=models.CASCADE, verbose_name=_(\"Project\"))\n description = models.TextField(blank=True, verbose_name=_(\"Description\"), help_text=_(\"Remarks and notes\"))\n\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = InterventionManager()\n\n class Meta:\n verbose_name = _(\"Intervention\")\n verbose_name_plural = _(\"Interventions\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n def default_stake(self):\n stake = None\n if self.target and isinstance(self.target, Topology):\n for path in self.target.paths.exclude(stake=None):\n if path.stake > stake:\n stake = path.stake\n return stake\n\n def reload(self):\n if self.pk:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.area = fromdb.area\n AltimetryMixin.reload(self, fromdb)\n TimeStampedModelMixin.reload(self, fromdb)\n NoDeleteMixin.reload(self, fromdb)\n if isinstance(self.target, Topology):\n self.target.reload()\n return self\n\n def save(self, *args, **kwargs):\n if self.stake is None:\n self.stake = self.default_stake()\n\n super().save(*args, **kwargs)\n\n # Invalidate project map\n if self.project:\n try:\n os.remove(self.project.get_map_image_path())\n except OSError:\n pass\n\n self.reload()\n\n @classproperty\n def target_verbose_name(cls):\n return _(\"On\")\n\n @property\n def target_display(self):\n icon = 'path'\n title = _('Paths')\n if not self.target._meta.model_name == \"topology\":\n icon = self.target._meta.model_name\n\n title = self.target.name_display\n return '<img src=\"%simages/%s-16.png\"> %s' % (settings.STATIC_URL,\n icon,\n title)\n\n @property\n def target_csv_display(self):\n return \"%s: %s (%s)\" % (\n _(self.target._meta.verbose_name),\n self.target,\n self.target.pk)\n\n @property\n def in_project(self):\n return self.project is not None\n\n @property\n def paths(self):\n if self.target._meta.model_name == 'blade':\n return self.target.signage.paths.all()\n if self.target:\n return self.target.paths.all()\n return Path.objects.none()\n\n @property\n def total_manday(self):\n total = 0.0\n for md in self.manday_set.all():\n total += float(md.nb_days)\n return total\n\n @classproperty\n def total_manday_verbose_name(cls):\n return _(\"Mandays\")\n\n @property\n def total_cost_mandays(self):\n total = 0.0\n for md in self.manday_set.all():\n total += md.cost\n return total\n\n @classproperty\n def total_cost_mandays_verbose_name(cls):\n return _(\"Mandays cost\")\n\n @property\n def total_cost(self):\n return self.total_cost_mandays + \\\n self.material_cost or 0 + \\\n self.heliport_cost or 0 + \\\n self.subcontract_cost or 0\n\n @classproperty\n def total_cost_verbose_name(cls):\n return _(\"Total cost\")\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n @property\n def geom(self):\n if self._geom is None:\n if self.target:\n self._geom = self.target.geom\n return self._geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n def __str__(self):\n return \"%s (%s)\" % (self.name, self.date)\n\n @classmethod\n def get_interventions(cls, obj):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n if settings.TREKKING_TOPOLOGY_ENABLED:\n topologies = list(Topology.overlapping(obj).values_list('pk', flat=True))\n else:\n area = obj.geom.buffer(settings.INTERVENTION_INTERSECTION_MARGIN)\n topologies = list(Topology.objects.existing().filter(geom__intersects=area).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def path_interventions(cls, path):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n topologies = list(Topology.objects.filter(aggregations__path=path).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def topology_interventions(cls, topology):\n return cls.get_interventions(topology)\n\n @classmethod\n def blade_interventions(cls, blade):\n return cls.get_interventions(blade.signage)\n\n @property\n def signages(self):\n if self.target_type == ContentType.objects.get_for_model(Signage):\n return [self.target]\n return []\n\n @property\n def infrastructures(self):\n if self.target_type == ContentType.objects.get_for_model(Infrastructure):\n return [self.target]\n return []\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this intervention to another class\"\"\"\n return settings.MAINTENANCE_INTERSECTION_MARGIN\n\n\nPath.add_property('interventions', lambda self: Intervention.path_interventions(self), _(\"Interventions\"))\nTopology.add_property('interventions', lambda self: Intervention.topology_interventions(self), _(\"Interventions\"))\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n Blade.add_property('interventions', lambda self: Intervention.blade_interventions(self), _(\"Interventions\"))\n\n\nclass InterventionStatus(StructureOrNoneRelated):\n\n status = models.CharField(verbose_name=_(\"Status\"), max_length=128)\n order = models.PositiveSmallIntegerField(default=None, null=True, blank=True, verbose_name=_(\"Display order\"))\n\n class Meta:\n verbose_name = _(\"Intervention's status\")\n verbose_name_plural = _(\"Intervention's statuses\")\n ordering = ['order', 'status']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.status, self.structure.name)\n return self.status\n\n\nclass InterventionType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Intervention's type\")\n verbose_name_plural = _(\"Intervention's types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass InterventionDisorder(StructureOrNoneRelated):\n\n disorder = models.CharField(max_length=128, verbose_name=_(\"Disorder\"))\n\n class Meta:\n verbose_name = _(\"Intervention's disorder\")\n verbose_name_plural = _(\"Intervention's disorders\")\n ordering = ['disorder']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.disorder, self.structure.name)\n return self.disorder\n\n\nclass InterventionJob(StructureOrNoneRelated):\n\n job = models.CharField(max_length=128, verbose_name=_(\"Job\"))\n cost = models.DecimalField(verbose_name=_(\"Cost\"), default=1.0, decimal_places=2, max_digits=8)\n\n class Meta:\n verbose_name = _(\"Intervention's job\")\n verbose_name_plural = _(\"Intervention's jobs\")\n ordering = ['job']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.job, self.structure.name)\n return self.job\n\n\nclass ManDay(models.Model):\n\n nb_days = models.DecimalField(verbose_name=_(\"Mandays\"), decimal_places=2, max_digits=6)\n intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)\n job = models.ForeignKey(InterventionJob, verbose_name=_(\"Job\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Manday\")\n verbose_name_plural = _(\"Mandays\")\n\n @property\n def cost(self):\n return float(self.nb_days * self.job.cost)\n\n def __str__(self):\n return str(self.nb_days)\n\n\nclass ProjectManager(NoDeleteManager):\n def year_choices(self):\n bounds = self.existing().aggregate(min=Min('begin_year'), max=Max('end_year'))\n if not bounds['min'] or not bounds['max']:\n return []\n return [(year, year) for year in range(bounds['min'], bounds['max'] + 1)]\n\n\nclass Project(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, TimeStampedModelMixin,\n StructureRelated, NoDeleteMixin):\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128)\n begin_year = models.IntegerField(verbose_name=_(\"Begin year\"))\n end_year = models.IntegerField(verbose_name=_(\"End year\"), blank=True, null=True)\n constraint = models.TextField(verbose_name=_(\"Constraint\"), blank=True,\n help_text=_(\"Specific conditions, ...\"))\n global_cost = models.FloatField(verbose_name=_(\"Global cost\"), default=0,\n blank=True, null=True, help_text=_(\"€\"))\n comments = models.TextField(verbose_name=_(\"Comments\"), blank=True,\n help_text=_(\"Remarks and notes\"))\n type = models.ForeignKey('ProjectType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n domain = models.ForeignKey('ProjectDomain', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Domain\"))\n contractors = models.ManyToManyField('Contractor', related_name=\"projects\", blank=True,\n verbose_name=_(\"Contractors\"))\n project_owner = models.ForeignKey(Organism, related_name='own', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project owner\"))\n project_manager = models.ForeignKey(Organism, related_name='manage', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project manager\"))\n founders = models.ManyToManyField(Organism, through='Funding', verbose_name=_(\"Founders\"))\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = ProjectManager()\n\n class Meta:\n verbose_name = _(\"Project\")\n verbose_name_plural = _(\"Projects\")\n ordering = ['-begin_year', 'name']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n @property\n def paths(self):\n s = []\n for i in self.interventions.existing():\n s += i.paths\n return Path.objects.filter(pk__in=[p.pk for p in set(s)])\n\n @property\n def trails(self):\n s = []\n for i in self.interventions.existing():\n for p in i.target.paths.all():\n for t in p.trails.all():\n s.append(t.pk)\n\n return Trail.objects.filter(pk__in=s)\n\n @property\n def signages(self):\n from geotrek.signage.models import Signage\n target_ids = self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Signage)).values_list('target_id', flat=True)\n return list(Signage.objects.filter(topo_object__in=target_ids))\n\n @property\n def infrastructures(self):\n from geotrek.infrastructure.models import Infrastructure\n target_ids = list(self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Infrastructure)).values_list('target_id', flat=True))\n return list(Infrastructure.objects.filter(topo_object__in=target_ids))\n\n @classproperty\n def geomfield(cls):\n from django.contrib.gis.geos import LineString\n # Fake field, TODO: still better than overkill code in views, but can do neater.\n c = GeometryCollection([LineString((0, 0), (1, 1))], srid=settings.SRID)\n c.name = 'geom'\n return c\n\n @property\n def geom(self):\n \"\"\" Merge all interventions geometry into a collection\n \"\"\"\n if self._geom is None:\n interventions = Intervention.objects.existing().filter(project=self)\n geoms = [i.geom for i in interventions if i.geom is not None]\n if geoms:\n self._geom = GeometryCollection(*geoms, srid=settings.SRID)\n return self._geom\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n @property\n def interventions_csv_display(self):\n return [str(i) for i in self.interventions.existing()]\n\n @property\n def contractors_display(self):\n return [str(c) for c in self.contractors.all()]\n\n @property\n def founders_display(self):\n return [str(f) for f in self.founders.all()]\n\n @property\n def period(self):\n return \"%s - %s\" % (self.begin_year, self.end_year or \"\")\n\n @property\n def period_display(self):\n return self.period\n\n @classproperty\n def period_verbose_name(cls):\n return _(\"Period\")\n\n @property\n def interventions_total_cost(self):\n total = 0\n qs = self.interventions.existing()\n for i in qs.prefetch_related('manday_set', 'manday_set__job'):\n total += i.total_cost\n return total\n\n @classproperty\n def interventions_total_cost_verbose_name(cls):\n return _(\"Interventions total cost\")\n\n def __str__(self):\n return \"%s - %s\" % (self.begin_year, self.name)\n\n @classmethod\n def path_projects(cls, path):\n return cls.objects.existing().filter(interventions__in=path.interventions.all()).distinct()\n\n @classmethod\n def topology_projects(cls, topology):\n return cls.objects.existing().filter(interventions__in=topology.interventions.all()).distinct()\n\n def edges_by_attr(self, interventionattr):\n \"\"\" Return related topology objects of project, by aggregating the same attribute\n on its interventions.\n (See geotrek.land.models)\n \"\"\"\n pks = []\n modelclass = Topology\n for i in self.interventions.all():\n attr_value = getattr(i, interventionattr)\n if isinstance(attr_value, list):\n pks += [o.pk for o in attr_value]\n else:\n modelclass = attr_value.model\n topologies = attr_value.values('id')\n for topology in topologies:\n pks.append(topology['id'])\n return modelclass.objects.filter(pk__in=pks)\n\n @classmethod\n def get_create_label(cls):\n return _(\"Add a new project\")\n\n\nPath.add_property('projects', lambda self: Project.path_projects(self), _(\"Projects\"))\nTopology.add_property('projects', lambda self: Project.topology_projects(self), _(\"Projects\"))\n\n\nclass ProjectType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Project type\")\n verbose_name_plural = _(\"Project types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass ProjectDomain(StructureOrNoneRelated):\n\n domain = models.CharField(max_length=128, verbose_name=_(\"Domain\"))\n\n class Meta:\n verbose_name = _(\"Project domain\")\n verbose_name_plural = _(\"Project domains\")\n ordering = ['domain']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.domain, self.structure.name)\n return self.domain\n\n\nclass Contractor(StructureOrNoneRelated):\n\n contractor = models.CharField(max_length=128, verbose_name=_(\"Contractor\"))\n\n class Meta:\n verbose_name = _(\"Contractor\")\n verbose_name_plural = _(\"Contractors\")\n ordering = ['contractor']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.contractor, self.structure.name)\n return self.contractor\n\n\nclass Funding(models.Model):\n\n amount = models.FloatField(verbose_name=_(\"Amount\"))\n project = models.ForeignKey(Project, verbose_name=_(\"Project\"), on_delete=models.CASCADE)\n organism = models.ForeignKey(Organism, verbose_name=_(\"Organism\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Funding\")\n verbose_name_plural = _(\"Fundings\")\n\n def __str__(self):\n return \"%s : %s\" % (self.project, self.amount)\n",
"path": "geotrek/maintenance/models.py"
}
] | [
{
"content": "import os\nfrom datetime import datetime\n\nfrom django.db.models import Q, Min, Max\nfrom django.db.models.functions import ExtractYear\nfrom django.conf import settings\nfrom django.utils.translation import gettext_lazy as _\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.gis.db import models\nfrom django.contrib.gis.geos import GeometryCollection\n\nfrom mapentity.models import MapEntityMixin\n\nfrom geotrek.authent.models import StructureRelated, StructureOrNoneRelated\nfrom geotrek.altimetry.models import AltimetryMixin\nfrom geotrek.core.models import Topology, Path, Trail\nfrom geotrek.common.models import Organism\nfrom geotrek.common.mixins import TimeStampedModelMixin, NoDeleteMixin, AddPropertyMixin, NoDeleteManager\nfrom geotrek.common.utils import classproperty\nfrom geotrek.infrastructure.models import Infrastructure\nfrom geotrek.signage.models import Signage\nfrom geotrek.zoning.mixins import ZoningPropertiesMixin\n\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n from geotrek.signage.models import Blade\n\n\nclass InterventionManager(NoDeleteManager):\n def year_choices(self):\n return self.existing().filter(date__isnull=False).annotate(year=ExtractYear('date')) \\\n .order_by('-year').distinct().values_list('year', 'year')\n\n\nclass Intervention(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, AltimetryMixin,\n TimeStampedModelMixin, StructureRelated, NoDeleteMixin):\n\n target_type = models.ForeignKey(ContentType, null=True, on_delete=models.CASCADE)\n target_id = models.PositiveIntegerField(blank=True, null=True)\n target = GenericForeignKey('target_type', 'target_id')\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128, help_text=_(\"Brief summary\"))\n date = models.DateField(default=datetime.now, verbose_name=_(\"Date\"), help_text=_(\"When ?\"))\n subcontracting = models.BooleanField(verbose_name=_(\"Subcontracting\"), default=False)\n\n # Technical information\n width = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Width\"))\n height = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Height\"))\n area = models.FloatField(editable=False, default=0, blank=True, null=True, verbose_name=_(\"Area\"))\n\n # Costs\n material_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Material cost\"))\n heliport_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Heliport cost\"))\n subcontract_cost = models.FloatField(default=0.0, blank=True, null=True, verbose_name=_(\"Subcontract cost\"))\n\n # AltimetyMixin for denormalized fields from related topology, updated via trigger.\n length = models.FloatField(editable=True, default=0.0, null=True, blank=True, verbose_name=_(\"3D Length\"))\n\n stake = models.ForeignKey('core.Stake', null=True, blank=True, on_delete=models.CASCADE,\n related_name='interventions', verbose_name=_(\"Stake\"))\n\n status = models.ForeignKey('InterventionStatus', verbose_name=_(\"Status\"), on_delete=models.CASCADE)\n\n type = models.ForeignKey('InterventionType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n\n disorders = models.ManyToManyField('InterventionDisorder', related_name=\"interventions\",\n verbose_name=_(\"Disorders\"), blank=True)\n\n jobs = models.ManyToManyField('InterventionJob', through='ManDay', verbose_name=_(\"Jobs\"))\n\n project = models.ForeignKey('Project', null=True, blank=True, related_name=\"interventions\",\n on_delete=models.CASCADE, verbose_name=_(\"Project\"))\n description = models.TextField(blank=True, verbose_name=_(\"Description\"), help_text=_(\"Remarks and notes\"))\n\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = InterventionManager()\n\n class Meta:\n verbose_name = _(\"Intervention\")\n verbose_name_plural = _(\"Interventions\")\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n def default_stake(self):\n stake = None\n if self.target and isinstance(self.target, Topology):\n for path in self.target.paths.exclude(stake=None):\n if path.stake > stake:\n stake = path.stake\n return stake\n\n def reload(self):\n if self.pk:\n fromdb = self.__class__.objects.get(pk=self.pk)\n self.area = fromdb.area\n AltimetryMixin.reload(self, fromdb)\n TimeStampedModelMixin.reload(self, fromdb)\n NoDeleteMixin.reload(self, fromdb)\n if isinstance(self.target, Topology):\n self.target.reload()\n return self\n\n def save(self, *args, **kwargs):\n if self.stake is None:\n self.stake = self.default_stake()\n\n super().save(*args, **kwargs)\n\n # Invalidate project map\n if self.project:\n try:\n os.remove(self.project.get_map_image_path())\n except OSError:\n pass\n\n self.reload()\n\n @classproperty\n def target_verbose_name(cls):\n return _(\"On\")\n\n @property\n def target_display(self):\n icon = 'path'\n title = _('Paths')\n if not self.target._meta.model_name == \"topology\":\n icon = self.target._meta.model_name\n\n title = self.target.name_display\n return '<img src=\"%simages/%s-16.png\"> %s' % (settings.STATIC_URL,\n icon,\n title)\n\n @property\n def target_csv_display(self):\n return \"%s: %s (%s)\" % (\n _(self.target._meta.verbose_name),\n self.target,\n self.target.pk)\n\n @property\n def in_project(self):\n return self.project is not None\n\n @property\n def paths(self):\n if self.target._meta.model_name == 'blade':\n return self.target.signage.paths.all()\n if self.target:\n return self.target.paths.all()\n return Path.objects.none()\n\n @property\n def total_manday(self):\n total = 0.0\n for md in self.manday_set.all():\n total += float(md.nb_days)\n return total\n\n @classproperty\n def total_manday_verbose_name(cls):\n return _(\"Mandays\")\n\n @property\n def total_cost_mandays(self):\n total = 0.0\n for md in self.manday_set.all():\n total += md.cost\n return total\n\n @classproperty\n def total_cost_mandays_verbose_name(cls):\n return _(\"Mandays cost\")\n\n @property\n def total_cost(self):\n return self.total_cost_mandays + \\\n (self.material_cost or 0) + \\\n (self.heliport_cost or 0) + \\\n (self.subcontract_cost or 0)\n\n @classproperty\n def total_cost_verbose_name(cls):\n return _(\"Total cost\")\n\n @classproperty\n def geomfield(cls):\n return Topology._meta.get_field('geom')\n\n @property\n def geom(self):\n if self._geom is None:\n if self.target:\n self._geom = self.target.geom\n return self._geom\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\" >%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n def __str__(self):\n return \"%s (%s)\" % (self.name, self.date)\n\n @classmethod\n def get_interventions(cls, obj):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n if settings.TREKKING_TOPOLOGY_ENABLED:\n topologies = list(Topology.overlapping(obj).values_list('pk', flat=True))\n else:\n area = obj.geom.buffer(settings.INTERVENTION_INTERSECTION_MARGIN)\n topologies = list(Topology.objects.existing().filter(geom__intersects=area).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def path_interventions(cls, path):\n blade_content_type = ContentType.objects.get_for_model(Blade)\n non_topology_content_types = [blade_content_type]\n if 'geotrek.outdoor' in settings.INSTALLED_APPS:\n non_topology_content_types += [\n ContentType.objects.get_by_natural_key('outdoor', 'site'),\n ContentType.objects.get_by_natural_key('outdoor', 'course'),\n ]\n topologies = list(Topology.objects.filter(aggregations__path=path).values_list('pk', flat=True))\n qs = Q(target_id__in=topologies) & ~Q(target_type__in=non_topology_content_types)\n if 'geotrek.signage' in settings.INSTALLED_APPS:\n blades = list(Blade.objects.filter(signage__in=topologies).values_list('id', flat=True))\n qs |= Q(target_id__in=blades, target_type=blade_content_type)\n return Intervention.objects.existing().filter(qs).distinct('pk')\n\n @classmethod\n def topology_interventions(cls, topology):\n return cls.get_interventions(topology)\n\n @classmethod\n def blade_interventions(cls, blade):\n return cls.get_interventions(blade.signage)\n\n @property\n def signages(self):\n if self.target_type == ContentType.objects.get_for_model(Signage):\n return [self.target]\n return []\n\n @property\n def infrastructures(self):\n if self.target_type == ContentType.objects.get_for_model(Infrastructure):\n return [self.target]\n return []\n\n def distance(self, to_cls):\n \"\"\"Distance to associate this intervention to another class\"\"\"\n return settings.MAINTENANCE_INTERSECTION_MARGIN\n\n\nPath.add_property('interventions', lambda self: Intervention.path_interventions(self), _(\"Interventions\"))\nTopology.add_property('interventions', lambda self: Intervention.topology_interventions(self), _(\"Interventions\"))\nif 'geotrek.signage' in settings.INSTALLED_APPS:\n Blade.add_property('interventions', lambda self: Intervention.blade_interventions(self), _(\"Interventions\"))\n\n\nclass InterventionStatus(StructureOrNoneRelated):\n\n status = models.CharField(verbose_name=_(\"Status\"), max_length=128)\n order = models.PositiveSmallIntegerField(default=None, null=True, blank=True, verbose_name=_(\"Display order\"))\n\n class Meta:\n verbose_name = _(\"Intervention's status\")\n verbose_name_plural = _(\"Intervention's statuses\")\n ordering = ['order', 'status']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.status, self.structure.name)\n return self.status\n\n\nclass InterventionType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Intervention's type\")\n verbose_name_plural = _(\"Intervention's types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass InterventionDisorder(StructureOrNoneRelated):\n\n disorder = models.CharField(max_length=128, verbose_name=_(\"Disorder\"))\n\n class Meta:\n verbose_name = _(\"Intervention's disorder\")\n verbose_name_plural = _(\"Intervention's disorders\")\n ordering = ['disorder']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.disorder, self.structure.name)\n return self.disorder\n\n\nclass InterventionJob(StructureOrNoneRelated):\n\n job = models.CharField(max_length=128, verbose_name=_(\"Job\"))\n cost = models.DecimalField(verbose_name=_(\"Cost\"), default=1.0, decimal_places=2, max_digits=8)\n\n class Meta:\n verbose_name = _(\"Intervention's job\")\n verbose_name_plural = _(\"Intervention's jobs\")\n ordering = ['job']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.job, self.structure.name)\n return self.job\n\n\nclass ManDay(models.Model):\n\n nb_days = models.DecimalField(verbose_name=_(\"Mandays\"), decimal_places=2, max_digits=6)\n intervention = models.ForeignKey(Intervention, on_delete=models.CASCADE)\n job = models.ForeignKey(InterventionJob, verbose_name=_(\"Job\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Manday\")\n verbose_name_plural = _(\"Mandays\")\n\n @property\n def cost(self):\n return float(self.nb_days * self.job.cost)\n\n def __str__(self):\n return str(self.nb_days)\n\n\nclass ProjectManager(NoDeleteManager):\n def year_choices(self):\n bounds = self.existing().aggregate(min=Min('begin_year'), max=Max('end_year'))\n if not bounds['min'] or not bounds['max']:\n return []\n return [(year, year) for year in range(bounds['min'], bounds['max'] + 1)]\n\n\nclass Project(ZoningPropertiesMixin, AddPropertyMixin, MapEntityMixin, TimeStampedModelMixin,\n StructureRelated, NoDeleteMixin):\n\n name = models.CharField(verbose_name=_(\"Name\"), max_length=128)\n begin_year = models.IntegerField(verbose_name=_(\"Begin year\"))\n end_year = models.IntegerField(verbose_name=_(\"End year\"), blank=True, null=True)\n constraint = models.TextField(verbose_name=_(\"Constraint\"), blank=True,\n help_text=_(\"Specific conditions, ...\"))\n global_cost = models.FloatField(verbose_name=_(\"Global cost\"), default=0,\n blank=True, null=True, help_text=_(\"€\"))\n comments = models.TextField(verbose_name=_(\"Comments\"), blank=True,\n help_text=_(\"Remarks and notes\"))\n type = models.ForeignKey('ProjectType', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Type\"))\n domain = models.ForeignKey('ProjectDomain', null=True, blank=True, on_delete=models.CASCADE,\n verbose_name=_(\"Domain\"))\n contractors = models.ManyToManyField('Contractor', related_name=\"projects\", blank=True,\n verbose_name=_(\"Contractors\"))\n project_owner = models.ForeignKey(Organism, related_name='own', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project owner\"))\n project_manager = models.ForeignKey(Organism, related_name='manage', blank=True, null=True, on_delete=models.CASCADE,\n verbose_name=_(\"Project manager\"))\n founders = models.ManyToManyField(Organism, through='Funding', verbose_name=_(\"Founders\"))\n eid = models.CharField(verbose_name=_(\"External id\"), max_length=1024, blank=True, null=True)\n\n objects = ProjectManager()\n\n class Meta:\n verbose_name = _(\"Project\")\n verbose_name_plural = _(\"Projects\")\n ordering = ['-begin_year', 'name']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._geom = None\n\n @property\n def paths(self):\n s = []\n for i in self.interventions.existing():\n s += i.paths\n return Path.objects.filter(pk__in=[p.pk for p in set(s)])\n\n @property\n def trails(self):\n s = []\n for i in self.interventions.existing():\n for p in i.target.paths.all():\n for t in p.trails.all():\n s.append(t.pk)\n\n return Trail.objects.filter(pk__in=s)\n\n @property\n def signages(self):\n from geotrek.signage.models import Signage\n target_ids = self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Signage)).values_list('target_id', flat=True)\n return list(Signage.objects.filter(topo_object__in=target_ids))\n\n @property\n def infrastructures(self):\n from geotrek.infrastructure.models import Infrastructure\n target_ids = list(self.interventions.existing().filter(target_type=ContentType.objects.get_for_model(Infrastructure)).values_list('target_id', flat=True))\n return list(Infrastructure.objects.filter(topo_object__in=target_ids))\n\n @classproperty\n def geomfield(cls):\n from django.contrib.gis.geos import LineString\n # Fake field, TODO: still better than overkill code in views, but can do neater.\n c = GeometryCollection([LineString((0, 0), (1, 1))], srid=settings.SRID)\n c.name = 'geom'\n return c\n\n @property\n def geom(self):\n \"\"\" Merge all interventions geometry into a collection\n \"\"\"\n if self._geom is None:\n interventions = Intervention.objects.existing().filter(project=self)\n geoms = [i.geom for i in interventions if i.geom is not None]\n if geoms:\n self._geom = GeometryCollection(*geoms, srid=settings.SRID)\n return self._geom\n\n @property\n def api_geom(self):\n if not self.geom:\n return None\n return self.geom.transform(settings.API_SRID, clone=True)\n\n @geom.setter\n def geom(self, value):\n self._geom = value\n\n @property\n def name_display(self):\n return '<a data-pk=\"%s\" href=\"%s\" title=\"%s\">%s</a>' % (self.pk,\n self.get_detail_url(),\n self.name,\n self.name)\n\n @property\n def name_csv_display(self):\n return self.name\n\n @property\n def interventions_csv_display(self):\n return [str(i) for i in self.interventions.existing()]\n\n @property\n def contractors_display(self):\n return [str(c) for c in self.contractors.all()]\n\n @property\n def founders_display(self):\n return [str(f) for f in self.founders.all()]\n\n @property\n def period(self):\n return \"%s - %s\" % (self.begin_year, self.end_year or \"\")\n\n @property\n def period_display(self):\n return self.period\n\n @classproperty\n def period_verbose_name(cls):\n return _(\"Period\")\n\n @property\n def interventions_total_cost(self):\n total = 0\n qs = self.interventions.existing()\n for i in qs.prefetch_related('manday_set', 'manday_set__job'):\n total += i.total_cost\n return total\n\n @classproperty\n def interventions_total_cost_verbose_name(cls):\n return _(\"Interventions total cost\")\n\n def __str__(self):\n return \"%s - %s\" % (self.begin_year, self.name)\n\n @classmethod\n def path_projects(cls, path):\n return cls.objects.existing().filter(interventions__in=path.interventions.all()).distinct()\n\n @classmethod\n def topology_projects(cls, topology):\n return cls.objects.existing().filter(interventions__in=topology.interventions.all()).distinct()\n\n def edges_by_attr(self, interventionattr):\n \"\"\" Return related topology objects of project, by aggregating the same attribute\n on its interventions.\n (See geotrek.land.models)\n \"\"\"\n pks = []\n modelclass = Topology\n for i in self.interventions.all():\n attr_value = getattr(i, interventionattr)\n if isinstance(attr_value, list):\n pks += [o.pk for o in attr_value]\n else:\n modelclass = attr_value.model\n topologies = attr_value.values('id')\n for topology in topologies:\n pks.append(topology['id'])\n return modelclass.objects.filter(pk__in=pks)\n\n @classmethod\n def get_create_label(cls):\n return _(\"Add a new project\")\n\n\nPath.add_property('projects', lambda self: Project.path_projects(self), _(\"Projects\"))\nTopology.add_property('projects', lambda self: Project.topology_projects(self), _(\"Projects\"))\n\n\nclass ProjectType(StructureOrNoneRelated):\n\n type = models.CharField(max_length=128, verbose_name=_(\"Type\"))\n\n class Meta:\n verbose_name = _(\"Project type\")\n verbose_name_plural = _(\"Project types\")\n ordering = ['type']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.type, self.structure.name)\n return self.type\n\n\nclass ProjectDomain(StructureOrNoneRelated):\n\n domain = models.CharField(max_length=128, verbose_name=_(\"Domain\"))\n\n class Meta:\n verbose_name = _(\"Project domain\")\n verbose_name_plural = _(\"Project domains\")\n ordering = ['domain']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.domain, self.structure.name)\n return self.domain\n\n\nclass Contractor(StructureOrNoneRelated):\n\n contractor = models.CharField(max_length=128, verbose_name=_(\"Contractor\"))\n\n class Meta:\n verbose_name = _(\"Contractor\")\n verbose_name_plural = _(\"Contractors\")\n ordering = ['contractor']\n\n def __str__(self):\n if self.structure:\n return \"{} ({})\".format(self.contractor, self.structure.name)\n return self.contractor\n\n\nclass Funding(models.Model):\n\n amount = models.FloatField(verbose_name=_(\"Amount\"))\n project = models.ForeignKey(Project, verbose_name=_(\"Project\"), on_delete=models.CASCADE)\n organism = models.ForeignKey(Organism, verbose_name=_(\"Organism\"), on_delete=models.CASCADE)\n\n class Meta:\n verbose_name = _(\"Funding\")\n verbose_name_plural = _(\"Fundings\")\n\n def __str__(self):\n return \"%s : %s\" % (self.project, self.amount)\n",
"path": "geotrek/maintenance/models.py"
}
] | diff --git a/geotrek/maintenance/models.py b/geotrek/maintenance/models.py
index 195fc6e232..1b6a6b1862 100755
--- a/geotrek/maintenance/models.py
+++ b/geotrek/maintenance/models.py
@@ -179,9 +179,9 @@ def total_cost_mandays_verbose_name(cls):
@property
def total_cost(self):
return self.total_cost_mandays + \
- self.material_cost or 0 + \
- self.heliport_cost or 0 + \
- self.subcontract_cost or 0
+ (self.material_cost or 0) + \
+ (self.heliport_cost or 0) + \
+ (self.subcontract_cost or 0)
@classproperty
def total_cost_verbose_name(cls):
diff --git a/geotrek/maintenance/tests/test_intervention.py b/geotrek/maintenance/tests/test_intervention.py
index 4f8b4b110d..523016b556 100644
--- a/geotrek/maintenance/tests/test_intervention.py
+++ b/geotrek/maintenance/tests/test_intervention.py
@@ -198,3 +198,12 @@ def test_infrastructure_display_shows_object_name(self):
self.assertIn('signage-16.png', interv.target_display)
name = interv.target.name
self.assertIn(name, interv.target_display)
+
+ def test_total_cost(self):
+ interv = InfrastructureInterventionFactory.create(
+ material_cost=1,
+ heliport_cost=2,
+ subcontract_cost=4
+ # implicit 1 manday x 500 €
+ )
+ self.assertEqual(interv.total_cost, 507)
| Interventions - calcul des coûts
Dans le module intervention, il y a un truc que je comprends pas trop sur le calcul des coûts, en effet les coûts ne se cumulent pas, on dirait qu'il privilégie certains postes.
Par exemple si j'ajoute que la sous-traitance c'est bon :

Si je viens à ajouter du matériel en même temps, ça ne prend plus en compte la sous-traitance

Et si j’ajoute tout, ça prend en compte que le coût matériel et homme

Je peux comprendre la logique de départ en se disant c'est soit une intervention par un sous traitant ou soit une intervention interne, mais il peut y avoir des cas où il y a un coût matériel en plus d'une sous-traitance ou même une intervention d'un technicien. Du coup dans un soucis de compréhension et pour éviter des erreurs de suivi, est-ce que ce serait pas mieux de cumuler l'ensemble des coûts dans le coût total ?
|
pallets__werkzeug-1798 | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n",
"path": "src/werkzeug/useragents.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\n werkzeug.useragents\n ~~~~~~~~~~~~~~~~~~~\n\n This module provides a helper to inspect user agent strings. This module\n is far from complete but should work for most of the currently available\n browsers.\n\n\n :copyright: 2007 Pallets\n :license: BSD-3-Clause\n\"\"\"\nimport re\n\n\nclass UserAgentParser(object):\n \"\"\"A simple user agent parser. Used by the `UserAgent`.\"\"\"\n\n platforms = (\n (\" cros \", \"chromeos\"),\n (\"iphone|ios\", \"iphone\"),\n (\"ipad\", \"ipad\"),\n (r\"darwin|mac|os\\s*x\", \"macos\"),\n (\"win\", \"windows\"),\n (r\"android\", \"android\"),\n (\"netbsd\", \"netbsd\"),\n (\"openbsd\", \"openbsd\"),\n (\"freebsd\", \"freebsd\"),\n (\"dragonfly\", \"dragonflybsd\"),\n (\"(sun|i86)os\", \"solaris\"),\n (r\"x11|lin(\\b|ux)?\", \"linux\"),\n (r\"nintendo\\s+wii\", \"wii\"),\n (\"irix\", \"irix\"),\n (\"hp-?ux\", \"hpux\"),\n (\"aix\", \"aix\"),\n (\"sco|unix_sv\", \"sco\"),\n (\"bsd\", \"bsd\"),\n (\"amiga\", \"amiga\"),\n (\"blackberry|playbook\", \"blackberry\"),\n (\"symbian\", \"symbian\"),\n )\n browsers = (\n (\"googlebot\", \"google\"),\n (\"msnbot\", \"msn\"),\n (\"yahoo\", \"yahoo\"),\n (\"ask jeeves\", \"ask\"),\n (r\"aol|america\\s+online\\s+browser\", \"aol\"),\n (r\"opera|opr\", \"opera\"),\n (\"edge|edg\", \"edge\"),\n (\"chrome|crios\", \"chrome\"),\n (\"seamonkey\", \"seamonkey\"),\n (\"firefox|firebird|phoenix|iceweasel\", \"firefox\"),\n (\"galeon\", \"galeon\"),\n (\"safari|version\", \"safari\"),\n (\"webkit\", \"webkit\"),\n (\"camino\", \"camino\"),\n (\"konqueror\", \"konqueror\"),\n (\"k-meleon\", \"kmeleon\"),\n (\"netscape\", \"netscape\"),\n (r\"msie|microsoft\\s+internet\\s+explorer|trident/.+? rv:\", \"msie\"),\n (\"lynx\", \"lynx\"),\n (\"links\", \"links\"),\n (\"Baiduspider\", \"baidu\"),\n (\"bingbot\", \"bing\"),\n (\"mozilla\", \"mozilla\"),\n )\n\n _browser_version_re = r\"(?:%s)[/\\sa-z(]*(\\d+[.\\da-z]+)?\"\n _language_re = re.compile(\n r\"(?:;\\s*|\\s+)(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*;|\"\n r\"(?:\\(|\\[|;)\\s*(\\b\\w{2}\\b(?:-\\b\\w{2}\\b)?)\\s*(?:\\]|\\)|;)\"\n )\n\n def __init__(self):\n self.platforms = [(b, re.compile(a, re.I)) for a, b in self.platforms]\n self.browsers = [\n (b, re.compile(self._browser_version_re % a, re.I))\n for a, b in self.browsers\n ]\n\n def __call__(self, user_agent):\n for platform, regex in self.platforms: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n break\n else:\n platform = None\n for browser, regex in self.browsers: # noqa: B007\n match = regex.search(user_agent)\n if match is not None:\n version = match.group(1)\n break\n else:\n browser = version = None\n match = self._language_re.search(user_agent)\n if match is not None:\n language = match.group(1) or match.group(2)\n else:\n language = None\n return platform, browser, version, language\n\n\nclass UserAgent(object):\n \"\"\"Represents a user agent. Pass it a WSGI environment or a user agent\n string and you can inspect some of the details from the user agent\n string via the attributes. The following attributes exist:\n\n .. attribute:: string\n\n the raw user agent string\n\n .. attribute:: platform\n\n the browser platform. ``None`` if not recognized.\n The following platforms are currently recognized:\n\n - `aix`\n - `amiga`\n - `android`\n - `blackberry`\n - `bsd`\n - `chromeos`\n - `dragonflybsd`\n - `freebsd`\n - `hpux`\n - `ipad`\n - `iphone`\n - `irix`\n - `linux`\n - `macos`\n - `netbsd`\n - `openbsd`\n - `sco`\n - `solaris`\n - `symbian`\n - `wii`\n - `windows`\n\n .. attribute:: browser\n\n the name of the browser. ``None`` if not recognized.\n The following browsers are currently recognized:\n\n - `aol` *\n - `ask` *\n - `baidu` *\n - `bing` *\n - `camino`\n - `chrome`\n - `edge`\n - `firefox`\n - `galeon`\n - `google` *\n - `kmeleon`\n - `konqueror`\n - `links`\n - `lynx`\n - `mozilla`\n - `msie`\n - `msn`\n - `netscape`\n - `opera`\n - `safari`\n - `seamonkey`\n - `webkit`\n - `yahoo` *\n\n (Browsers marked with a star (``*``) are crawlers.)\n\n .. attribute:: version\n\n the version of the browser. ``None`` if not recognized.\n\n .. attribute:: language\n\n the language of the browser. ``None`` if not recognized.\n \"\"\"\n\n _parser = UserAgentParser()\n\n def __init__(self, environ_or_string):\n if isinstance(environ_or_string, dict):\n environ_or_string = environ_or_string.get(\"HTTP_USER_AGENT\", \"\")\n self.string = environ_or_string\n self.platform, self.browser, self.version, self.language = self._parser(\n environ_or_string\n )\n\n def to_header(self):\n return self.string\n\n def __str__(self):\n return self.string\n\n def __nonzero__(self):\n return bool(self.browser)\n\n __bool__ = __nonzero__\n\n def __repr__(self):\n return \"<%s %r/%s>\" % (self.__class__.__name__, self.browser, self.version)\n",
"path": "src/werkzeug/useragents.py"
}
] | diff --git a/CHANGES.rst b/CHANGES.rst
index 6193f3eec..d275a24e7 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -1,5 +1,12 @@
.. currentmodule:: werkzeug
+Version 1.0.2
+-------------
+
+- Add new "edg" identifier for Edge in UserAgentPreparser.
+ :issue:`1797`
+
+
Version 1.0.1
-------------
diff --git a/src/werkzeug/useragents.py b/src/werkzeug/useragents.py
index 6ef6e2b8e..5f8e6b211 100644
--- a/src/werkzeug/useragents.py
+++ b/src/werkzeug/useragents.py
@@ -47,7 +47,7 @@ class UserAgentParser(object):
("ask jeeves", "ask"),
(r"aol|america\s+online\s+browser", "aol"),
(r"opera|opr", "opera"),
- ("edge", "edge"),
+ ("edge|edg", "edge"),
("chrome|crios", "chrome"),
("seamonkey", "seamonkey"),
("firefox|firebird|phoenix|iceweasel", "firefox"),
diff --git a/tests/test_useragents.py b/tests/test_useragents.py
new file mode 100644
index 000000000..68e700c68
--- /dev/null
+++ b/tests/test_useragents.py
@@ -0,0 +1,44 @@
+# -*- coding: utf-8 -*-
+"""
+ tests.urls
+ ~~~~~~~~~~
+
+ URL helper tests.
+
+ :copyright: 2020 Pallets
+ :license: BSD-3-Clause
+"""
+import pytest
+
+from werkzeug import useragents
+
+
[email protected](
+ ("user_agent", "platform", "browser", "version", "language"),
+ (
+ (
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 Edge/13.10586", # noqa B950
+ "windows",
+ "edge",
+ "13.10586",
+ None,
+ ),
+ (
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68", # noqa B950
+ "windows",
+ "edge",
+ "81.0.416.68",
+ None,
+ ),
+ (
+ "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4123.0 Safari/537.36 Edg/84.0.499.0 Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36", # noqa B950
+ "windows",
+ "edge",
+ "84.0.499.0",
+ None,
+ ),
+ ),
+)
+def test_edge_browsers(user_agent, platform, browser, version, language):
+ parsed = useragents.UserAgentParser()(user_agent)
+ assert parsed == (platform, browser, version, language)
| New Microsoft Edge User Agent
## Background
Microsoft Edge now based on Chromium and the user agent string is updated.
`Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68`
## Simple Code
```python
@app.route('/browser')
def browser():
from flask import request
ua = request.user_agent
return jsonify({
'browser': ua.browser,
'platform': ua.platform,
'user_agent': ua.string,
'version': ua.version,
})
```
## Expected Result
```json
{
"browser": "edge",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.416.68"
}
```
| Key | Value |
| --- | --- |
| browser | **edge** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.416.68** |
## Actual Result
```json
{
"browser": "chrome",
"platform": "windows",
"user_agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68",
"version": "81.0.4044.129"
}
```
| Key | Value |
| --- | --- |
| browser | **chrome** |
| platform | windows |
| user_agent | Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.129 Safari/537.36 Edg/81.0.416.68 |
| version | **81.0.4044.129** |
## Environment
- Windows 10 Pro 1909
- Python 3.6.6
- Werkzeug 0.16.1
- Flask 1.1.1
### Related Issues
#818, #1556
|
ansible-collections__community.general-1082 | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2015, Paul Markham <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: solaris_zone\nshort_description: Manage Solaris zones\ndescription:\n - Create, start, stop and delete Solaris zones.\n - This module does not currently allow changing of options for a zone that is already been created.\nauthor:\n- Paul Markham (@pmarkham)\nrequirements:\n - Solaris 10 or 11\noptions:\n state:\n description:\n - C(present), configure and install the zone.\n - C(installed), synonym for C(present).\n - C(running), if the zone already exists, boot it, otherwise, configure and install\n the zone first, then boot it.\n - C(started), synonym for C(running).\n - C(stopped), shutdown a zone.\n - C(absent), destroy the zone.\n - C(configured), configure the ready so that it's to be attached.\n - C(attached), attach a zone, but do not boot it.\n - C(detached), shutdown and detach a zone\n type: str\n choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]\n default: present\n required: true\n name:\n description:\n - Zone name.\n - A zone name must be unique name.\n - A zone name must begin with an alpha-numeric character.\n - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).\n - The name cannot be longer than 64 characters.\n type: str\n required: true\n path:\n description:\n - The path where the zone will be created. This is required when the zone is created, but not\n used otherwise.\n type: str\n sparse:\n description:\n - Whether to create a sparse (C(true)) or whole root (C(false)) zone.\n type: bool\n default: no\n root_password:\n description:\n - The password hash for the root account. If not specified, the zone's root account\n will not have a password.\n type: str\n config:\n description:\n - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options\n and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.\n \"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end\"'\n type: str\n default: ''\n create_options:\n description:\n - 'Extra options to the zonecfg(1M) create command.'\n type: str\n default: ''\n install_options:\n description:\n - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,\n use this to specify the profile XML file, e.g. install_options=\"-c sc_profile.xml\"'\n type: str\n default: ''\n attach_options:\n description:\n - 'Extra options to the zoneadm attach command. For example, this can be used to specify\n whether a minimum or full update of packages is required and if any packages need to\n be deleted. For valid values, see zoneadm(1M)'\n type: str\n default: ''\n timeout:\n description:\n - Timeout, in seconds, for zone to boot.\n type: int\n default: 600\n'''\n\nEXAMPLES = '''\n- name: Create and install a zone, but don't boot it\n community.general.solaris_zone:\n name: zone1\n state: present\n path: /zones/zone1\n sparse: True\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Create and install a zone and boot it\n community.general.solaris_zone:\n name: zone1\n state: running\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Boot an already installed zone\n community.general.solaris_zone:\n name: zone1\n state: running\n\n- name: Stop a zone\n community.general.solaris_zone:\n name: zone1\n state: stopped\n\n- name: Destroy a zone\n community.general.solaris_zone:\n name: zone1\n state: absent\n\n- name: Detach a zone\n community.general.solaris_zone:\n name: zone1\n state: detached\n\n- name: Configure a zone, ready to be attached\n community.general.solaris_zone:\n name: zone1\n state: configured\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Attach zone1\n community.general.solaris_zone:\n name: zone1\n state: attached\n attach_options: -u\n'''\n\nimport os\nimport platform\nimport re\nimport tempfile\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Zone(object):\n def __init__(self, module):\n self.changed = False\n self.msg = []\n\n self.module = module\n self.path = self.module.params['path']\n self.name = self.module.params['name']\n self.sparse = self.module.params['sparse']\n self.root_password = self.module.params['root_password']\n self.timeout = self.module.params['timeout']\n self.config = self.module.params['config']\n self.create_options = self.module.params['create_options']\n self.install_options = self.module.params['install_options']\n self.attach_options = self.module.params['attach_options']\n\n self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)\n self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)\n self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)\n\n if self.module.check_mode:\n self.msg.append('Running in check mode')\n\n if platform.system() != 'SunOS':\n self.module.fail_json(msg='This module requires Solaris')\n\n (self.os_major, self.os_minor) = platform.release().split('.')\n if int(self.os_minor) < 10:\n self.module.fail_json(msg='This module requires Solaris 10 or later')\n\n match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)\n if not match:\n self.module.fail_json(msg=\"Provided zone name is not a valid zone name. \"\n \"Please refer documentation for correct zone name specifications.\")\n\n def configure(self):\n if not self.path:\n self.module.fail_json(msg='Missing required argument: path')\n\n if not self.module.check_mode:\n t = tempfile.NamedTemporaryFile(delete=False)\n\n if self.sparse:\n t.write('create %s\\n' % self.create_options)\n self.msg.append('creating sparse-root zone')\n else:\n t.write('create -b %s\\n' % self.create_options)\n self.msg.append('creating whole-root zone')\n\n t.write('set zonepath=%s\\n' % self.path)\n t.write('%s\\n' % self.config)\n t.close()\n\n cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create zone. %s' % (out + err))\n os.unlink(t.name)\n\n self.changed = True\n self.msg.append('zone configured')\n\n def install(self):\n if not self.module.check_mode:\n cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to install zone. %s' % (out + err))\n if int(self.os_minor) == 10:\n self.configure_sysid()\n self.configure_password()\n self.configure_ssh_keys()\n self.changed = True\n self.msg.append('zone installed')\n\n def uninstall(self):\n if self.is_installed():\n if not self.module.check_mode:\n cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone uninstalled')\n\n def configure_sysid(self):\n if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):\n os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)\n\n open('%s/root/noautoshutdown' % self.path, 'w').close()\n\n node = open('%s/root/etc/nodename' % self.path, 'w')\n node.write(self.name)\n node.close()\n\n id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')\n id.write('1 # System previously configured?\\n')\n id.write('1 # Bootparams succeeded?\\n')\n id.write('1 # System is on a network?\\n')\n id.write('1 # Extended network information gathered?\\n')\n id.write('0 # Autobinder succeeded?\\n')\n id.write('1 # Network has subnets?\\n')\n id.write('1 # root password prompted for?\\n')\n id.write('1 # locale and term prompted for?\\n')\n id.write('1 # security policy in place\\n')\n id.write('1 # NFSv4 domain configured\\n')\n id.write('0 # Auto Registration Configured\\n')\n id.write('vt100')\n id.close()\n\n def configure_ssh_keys(self):\n rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path\n dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path\n\n if not os.path.isfile(rsa_key_file):\n cmd = '%s -f %s -t rsa -N \"\"' % (self.ssh_keygen_cmd, rsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))\n\n if not os.path.isfile(dsa_key_file):\n cmd = '%s -f %s -t dsa -N \"\"' % (self.ssh_keygen_cmd, dsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))\n\n def configure_password(self):\n shadow = '%s/root/etc/shadow' % self.path\n if self.root_password:\n f = open(shadow, 'r')\n lines = f.readlines()\n f.close()\n\n for i in range(0, len(lines)):\n fields = lines[i].split(':')\n if fields[0] == 'root':\n fields[1] = self.root_password\n lines[i] = ':'.join(fields)\n\n f = open(shadow, 'w')\n for line in lines:\n f.write(line)\n f.close()\n\n def boot(self):\n if not self.module.check_mode:\n cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))\n\n \"\"\"\n The boot command can return before the zone has fully booted. This is especially\n true on the first boot when the zone initializes the SMF services. Unless the zone\n has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.\n Wait until the zone's console login is running; once that's running, consider the zone booted.\n \"\"\"\n\n elapsed = 0\n while True:\n if elapsed > self.timeout:\n self.module.fail_json(msg='timed out waiting for zone to boot')\n rc = os.system('ps -z %s -o args|grep \"ttymon.*-d /dev/console\" > /dev/null 2>/dev/null' % self.name)\n if rc == 0:\n break\n time.sleep(10)\n elapsed += 10\n self.changed = True\n self.msg.append('zone booted')\n\n def destroy(self):\n if self.is_running():\n self.stop()\n if self.is_installed():\n self.uninstall()\n if not self.module.check_mode:\n cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone deleted')\n\n def stop(self):\n if not self.module.check_mode:\n cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone stopped')\n\n def detach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone detached')\n\n def attach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone attached')\n\n def exists(self):\n cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return True\n else:\n return False\n\n def is_running(self):\n return self.status() == 'running'\n\n def is_installed(self):\n return self.status() == 'installed'\n\n def is_configured(self):\n return self.status() == 'configured'\n\n def status(self):\n cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return out.split(':')[2]\n else:\n return 'undefined'\n\n def state_present(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n self.install()\n\n def state_running(self):\n self.state_present()\n if self.is_running():\n self.msg.append('zone already running')\n else:\n self.boot()\n\n def state_stopped(self):\n if self.exists():\n self.stop()\n else:\n self.module.fail_json(msg='zone does not exist')\n\n def state_absent(self):\n if self.exists():\n if self.is_running():\n self.stop()\n self.destroy()\n else:\n self.msg.append('zone does not exist')\n\n def state_configured(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n\n def state_detached(self):\n if not self.exists():\n self.module.fail_json(msg='zone does not exist')\n if self.is_configured():\n self.msg.append('zone already detached')\n else:\n self.stop()\n self.detach()\n\n def state_attached(self):\n if not self.exists():\n self.msg.append('zone does not exist')\n if self.is_configured():\n self.attach()\n else:\n self.msg.append('zone already attached')\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present',\n choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),\n path=dict(type='str'),\n sparse=dict(type='bool', default=False),\n root_password=dict(type='str', no_log=True),\n timeout=dict(type='int', default=600),\n config=dict(type='str', default=''),\n create_options=dict(type='str', default=''),\n install_options=dict(type='str', default=''),\n attach_options=dict(type='str', default=''),\n ),\n supports_check_mode=True,\n )\n\n zone = Zone(module)\n\n state = module.params['state']\n\n if state == 'running' or state == 'started':\n zone.state_running()\n elif state == 'present' or state == 'installed':\n zone.state_present()\n elif state == 'stopped':\n zone.state_stopped()\n elif state == 'absent':\n zone.state_absent()\n elif state == 'configured':\n zone.state_configured()\n elif state == 'detached':\n zone.state_detached()\n elif state == 'attached':\n zone.state_attached()\n else:\n module.fail_json(msg='Invalid state: %s' % state)\n\n module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/system/solaris_zone.py"
}
] | [
{
"content": "#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# Copyright: (c) 2015, Paul Markham <[email protected]>\n# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)\n\nfrom __future__ import absolute_import, division, print_function\n__metaclass__ = type\n\nDOCUMENTATION = r'''\n---\nmodule: solaris_zone\nshort_description: Manage Solaris zones\ndescription:\n - Create, start, stop and delete Solaris zones.\n - This module does not currently allow changing of options for a zone that is already been created.\nauthor:\n- Paul Markham (@pmarkham)\nrequirements:\n - Solaris 10 or 11\noptions:\n state:\n description:\n - C(present), configure and install the zone.\n - C(installed), synonym for C(present).\n - C(running), if the zone already exists, boot it, otherwise, configure and install\n the zone first, then boot it.\n - C(started), synonym for C(running).\n - C(stopped), shutdown a zone.\n - C(absent), destroy the zone.\n - C(configured), configure the ready so that it's to be attached.\n - C(attached), attach a zone, but do not boot it.\n - C(detached), shutdown and detach a zone\n type: str\n choices: [ absent, attached, configured, detached, installed, present, running, started, stopped ]\n default: present\n required: true\n name:\n description:\n - Zone name.\n - A zone name must be unique name.\n - A zone name must begin with an alpha-numeric character.\n - The name can contain alpha-numeric characters, underbars I(_), hyphens I(-), and periods I(.).\n - The name cannot be longer than 64 characters.\n type: str\n required: true\n path:\n description:\n - The path where the zone will be created. This is required when the zone is created, but not\n used otherwise.\n type: str\n sparse:\n description:\n - Whether to create a sparse (C(true)) or whole root (C(false)) zone.\n type: bool\n default: no\n root_password:\n description:\n - The password hash for the root account. If not specified, the zone's root account\n will not have a password.\n type: str\n config:\n description:\n - 'The zonecfg configuration commands for this zone. See zonecfg(1M) for the valid options\n and syntax. Typically this is a list of options separated by semi-colons or new lines, e.g.\n \"set auto-boot=true;add net;set physical=bge0;set address=10.1.1.1;end\"'\n type: str\n default: ''\n create_options:\n description:\n - 'Extra options to the zonecfg(1M) create command.'\n type: str\n default: ''\n install_options:\n description:\n - 'Extra options to the zoneadm(1M) install command. To automate Solaris 11 zone creation,\n use this to specify the profile XML file, e.g. install_options=\"-c sc_profile.xml\"'\n type: str\n default: ''\n attach_options:\n description:\n - 'Extra options to the zoneadm attach command. For example, this can be used to specify\n whether a minimum or full update of packages is required and if any packages need to\n be deleted. For valid values, see zoneadm(1M)'\n type: str\n default: ''\n timeout:\n description:\n - Timeout, in seconds, for zone to boot.\n type: int\n default: 600\n'''\n\nEXAMPLES = '''\n- name: Create and install a zone, but don't boot it\n community.general.solaris_zone:\n name: zone1\n state: present\n path: /zones/zone1\n sparse: True\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Create and install a zone and boot it\n community.general.solaris_zone:\n name: zone1\n state: running\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Boot an already installed zone\n community.general.solaris_zone:\n name: zone1\n state: running\n\n- name: Stop a zone\n community.general.solaris_zone:\n name: zone1\n state: stopped\n\n- name: Destroy a zone\n community.general.solaris_zone:\n name: zone1\n state: absent\n\n- name: Detach a zone\n community.general.solaris_zone:\n name: zone1\n state: detached\n\n- name: Configure a zone, ready to be attached\n community.general.solaris_zone:\n name: zone1\n state: configured\n path: /zones/zone1\n root_password: Be9oX7OSwWoU.\n config: 'set autoboot=true; add net; set physical=bge0; set address=10.1.1.1; end'\n\n- name: Attach zone1\n community.general.solaris_zone:\n name: zone1\n state: attached\n attach_options: -u\n'''\n\nimport os\nimport platform\nimport re\nimport tempfile\nimport time\n\nfrom ansible.module_utils.basic import AnsibleModule\n\n\nclass Zone(object):\n def __init__(self, module):\n self.changed = False\n self.msg = []\n\n self.module = module\n self.path = self.module.params['path']\n self.name = self.module.params['name']\n self.sparse = self.module.params['sparse']\n self.root_password = self.module.params['root_password']\n self.timeout = self.module.params['timeout']\n self.config = self.module.params['config']\n self.create_options = self.module.params['create_options']\n self.install_options = self.module.params['install_options']\n self.attach_options = self.module.params['attach_options']\n\n self.zoneadm_cmd = self.module.get_bin_path('zoneadm', True)\n self.zonecfg_cmd = self.module.get_bin_path('zonecfg', True)\n self.ssh_keygen_cmd = self.module.get_bin_path('ssh-keygen', True)\n\n if self.module.check_mode:\n self.msg.append('Running in check mode')\n\n if platform.system() != 'SunOS':\n self.module.fail_json(msg='This module requires Solaris')\n\n (self.os_major, self.os_minor) = platform.release().split('.')\n if int(self.os_minor) < 10:\n self.module.fail_json(msg='This module requires Solaris 10 or later')\n\n match = re.match('^[a-zA-Z0-9][-_.a-zA-Z0-9]{0,62}$', self.name)\n if not match:\n self.module.fail_json(msg=\"Provided zone name is not a valid zone name. \"\n \"Please refer documentation for correct zone name specifications.\")\n\n def configure(self):\n if not self.path:\n self.module.fail_json(msg='Missing required argument: path')\n\n if not self.module.check_mode:\n t = tempfile.NamedTemporaryFile(delete=False, mode='wt')\n\n if self.sparse:\n t.write('create %s\\n' % self.create_options)\n self.msg.append('creating sparse-root zone')\n else:\n t.write('create -b %s\\n' % self.create_options)\n self.msg.append('creating whole-root zone')\n\n t.write('set zonepath=%s\\n' % self.path)\n t.write('%s\\n' % self.config)\n t.close()\n\n cmd = '%s -z %s -f %s' % (self.zonecfg_cmd, self.name, t.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create zone. %s' % (out + err))\n os.unlink(t.name)\n\n self.changed = True\n self.msg.append('zone configured')\n\n def install(self):\n if not self.module.check_mode:\n cmd = '%s -z %s install %s' % (self.zoneadm_cmd, self.name, self.install_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to install zone. %s' % (out + err))\n if int(self.os_minor) == 10:\n self.configure_sysid()\n self.configure_password()\n self.configure_ssh_keys()\n self.changed = True\n self.msg.append('zone installed')\n\n def uninstall(self):\n if self.is_installed():\n if not self.module.check_mode:\n cmd = '%s -z %s uninstall -F' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to uninstall zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone uninstalled')\n\n def configure_sysid(self):\n if os.path.isfile('%s/root/etc/.UNCONFIGURED' % self.path):\n os.unlink('%s/root/etc/.UNCONFIGURED' % self.path)\n\n open('%s/root/noautoshutdown' % self.path, 'w').close()\n\n node = open('%s/root/etc/nodename' % self.path, 'w')\n node.write(self.name)\n node.close()\n\n id = open('%s/root/etc/.sysIDtool.state' % self.path, 'w')\n id.write('1 # System previously configured?\\n')\n id.write('1 # Bootparams succeeded?\\n')\n id.write('1 # System is on a network?\\n')\n id.write('1 # Extended network information gathered?\\n')\n id.write('0 # Autobinder succeeded?\\n')\n id.write('1 # Network has subnets?\\n')\n id.write('1 # root password prompted for?\\n')\n id.write('1 # locale and term prompted for?\\n')\n id.write('1 # security policy in place\\n')\n id.write('1 # NFSv4 domain configured\\n')\n id.write('0 # Auto Registration Configured\\n')\n id.write('vt100')\n id.close()\n\n def configure_ssh_keys(self):\n rsa_key_file = '%s/root/etc/ssh/ssh_host_rsa_key' % self.path\n dsa_key_file = '%s/root/etc/ssh/ssh_host_dsa_key' % self.path\n\n if not os.path.isfile(rsa_key_file):\n cmd = '%s -f %s -t rsa -N \"\"' % (self.ssh_keygen_cmd, rsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create rsa key. %s' % (out + err))\n\n if not os.path.isfile(dsa_key_file):\n cmd = '%s -f %s -t dsa -N \"\"' % (self.ssh_keygen_cmd, dsa_key_file)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to create dsa key. %s' % (out + err))\n\n def configure_password(self):\n shadow = '%s/root/etc/shadow' % self.path\n if self.root_password:\n f = open(shadow, 'r')\n lines = f.readlines()\n f.close()\n\n for i in range(0, len(lines)):\n fields = lines[i].split(':')\n if fields[0] == 'root':\n fields[1] = self.root_password\n lines[i] = ':'.join(fields)\n\n f = open(shadow, 'w')\n for line in lines:\n f.write(line)\n f.close()\n\n def boot(self):\n if not self.module.check_mode:\n cmd = '%s -z %s boot' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to boot zone. %s' % (out + err))\n\n \"\"\"\n The boot command can return before the zone has fully booted. This is especially\n true on the first boot when the zone initializes the SMF services. Unless the zone\n has fully booted, subsequent tasks in the playbook may fail as services aren't running yet.\n Wait until the zone's console login is running; once that's running, consider the zone booted.\n \"\"\"\n\n elapsed = 0\n while True:\n if elapsed > self.timeout:\n self.module.fail_json(msg='timed out waiting for zone to boot')\n rc = os.system('ps -z %s -o args|grep \"ttymon.*-d /dev/console\" > /dev/null 2>/dev/null' % self.name)\n if rc == 0:\n break\n time.sleep(10)\n elapsed += 10\n self.changed = True\n self.msg.append('zone booted')\n\n def destroy(self):\n if self.is_running():\n self.stop()\n if self.is_installed():\n self.uninstall()\n if not self.module.check_mode:\n cmd = '%s -z %s delete -F' % (self.zonecfg_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to delete zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone deleted')\n\n def stop(self):\n if not self.module.check_mode:\n cmd = '%s -z %s halt' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to stop zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone stopped')\n\n def detach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s detach' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to detach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone detached')\n\n def attach(self):\n if not self.module.check_mode:\n cmd = '%s -z %s attach %s' % (self.zoneadm_cmd, self.name, self.attach_options)\n (rc, out, err) = self.module.run_command(cmd)\n if rc != 0:\n self.module.fail_json(msg='Failed to attach zone. %s' % (out + err))\n self.changed = True\n self.msg.append('zone attached')\n\n def exists(self):\n cmd = '%s -z %s list' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return True\n else:\n return False\n\n def is_running(self):\n return self.status() == 'running'\n\n def is_installed(self):\n return self.status() == 'installed'\n\n def is_configured(self):\n return self.status() == 'configured'\n\n def status(self):\n cmd = '%s -z %s list -p' % (self.zoneadm_cmd, self.name)\n (rc, out, err) = self.module.run_command(cmd)\n if rc == 0:\n return out.split(':')[2]\n else:\n return 'undefined'\n\n def state_present(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n self.install()\n\n def state_running(self):\n self.state_present()\n if self.is_running():\n self.msg.append('zone already running')\n else:\n self.boot()\n\n def state_stopped(self):\n if self.exists():\n self.stop()\n else:\n self.module.fail_json(msg='zone does not exist')\n\n def state_absent(self):\n if self.exists():\n if self.is_running():\n self.stop()\n self.destroy()\n else:\n self.msg.append('zone does not exist')\n\n def state_configured(self):\n if self.exists():\n self.msg.append('zone already exists')\n else:\n self.configure()\n\n def state_detached(self):\n if not self.exists():\n self.module.fail_json(msg='zone does not exist')\n if self.is_configured():\n self.msg.append('zone already detached')\n else:\n self.stop()\n self.detach()\n\n def state_attached(self):\n if not self.exists():\n self.msg.append('zone does not exist')\n if self.is_configured():\n self.attach()\n else:\n self.msg.append('zone already attached')\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n name=dict(type='str', required=True),\n state=dict(type='str', default='present',\n choices=['absent', 'attached', 'configured', 'detached', 'installed', 'present', 'running', 'started', 'stopped']),\n path=dict(type='str'),\n sparse=dict(type='bool', default=False),\n root_password=dict(type='str', no_log=True),\n timeout=dict(type='int', default=600),\n config=dict(type='str', default=''),\n create_options=dict(type='str', default=''),\n install_options=dict(type='str', default=''),\n attach_options=dict(type='str', default=''),\n ),\n supports_check_mode=True,\n )\n\n zone = Zone(module)\n\n state = module.params['state']\n\n if state == 'running' or state == 'started':\n zone.state_running()\n elif state == 'present' or state == 'installed':\n zone.state_present()\n elif state == 'stopped':\n zone.state_stopped()\n elif state == 'absent':\n zone.state_absent()\n elif state == 'configured':\n zone.state_configured()\n elif state == 'detached':\n zone.state_detached()\n elif state == 'attached':\n zone.state_attached()\n else:\n module.fail_json(msg='Invalid state: %s' % state)\n\n module.exit_json(changed=zone.changed, msg=', '.join(zone.msg))\n\n\nif __name__ == '__main__':\n main()\n",
"path": "plugins/modules/system/solaris_zone.py"
}
] | diff --git a/changelogs/fragments/1081-solaris_zone-python3.yml b/changelogs/fragments/1081-solaris_zone-python3.yml
new file mode 100644
index 00000000000..40cd448f5eb
--- /dev/null
+++ b/changelogs/fragments/1081-solaris_zone-python3.yml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - solaris_zone - fixed issue trying to configure zone in Python 3 (https://github.com/ansible-collections/community.general/issues/1081).
diff --git a/plugins/modules/system/solaris_zone.py b/plugins/modules/system/solaris_zone.py
index c188fea3268..867b0df56a9 100644
--- a/plugins/modules/system/solaris_zone.py
+++ b/plugins/modules/system/solaris_zone.py
@@ -193,7 +193,7 @@ def configure(self):
self.module.fail_json(msg='Missing required argument: path')
if not self.module.check_mode:
- t = tempfile.NamedTemporaryFile(delete=False)
+ t = tempfile.NamedTemporaryFile(delete=False, mode='wt')
if self.sparse:
t.write('create %s\n' % self.create_options)
diff --git a/tests/unit/plugins/modules/system/test_solaris_zone.py b/tests/unit/plugins/modules/system/test_solaris_zone.py
new file mode 100644
index 00000000000..4cf5c5ff7a2
--- /dev/null
+++ b/tests/unit/plugins/modules/system/test_solaris_zone.py
@@ -0,0 +1,115 @@
+# Copyright (c) 2020 Justin Bronn <[email protected]>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import platform
+
+import pytest
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.community.general.plugins.modules.system import (
+ solaris_zone
+)
+from ansible_collections.community.general.tests.unit.plugins.modules.utils import (
+ set_module_args,
+)
+
+
+ZONEADM = "/usr/sbin/zoneadm"
+
+
+def mocker_zone_set(mocker, rc=0, out="", err="", zone_exists=False, zone_status=None):
+ """
+ Configure common mocker object for Solaris Zone tests
+ """
+ exists = mocker.patch.object(solaris_zone.Zone, "exists")
+ exists.return_value = zone_exists
+ get_bin_path = mocker.patch.object(AnsibleModule, "get_bin_path")
+ get_bin_path.return_value = ZONEADM
+ run_command = mocker.patch.object(AnsibleModule, "run_command")
+ run_command.return_value = (rc, out, err)
+ platform_release = mocker.patch.object(platform, "release")
+ platform_release.return_value = "5.11"
+ platform_system = mocker.patch.object(platform, "system")
+ platform_system.return_value = "SunOS"
+ if zone_status is not None:
+ status = mocker.patch.object(solaris_zone.Zone, "status")
+ status.return_value = zone_status
+
+
[email protected]
+def mocked_zone_create(mocker):
+ mocker_zone_set(mocker)
+
+
[email protected]
+def mocked_zone_delete(mocker):
+ mocker_zone_set(mocker, zone_exists=True, zone_status="running")
+
+
+def test_zone_create(mocked_zone_create, capfd):
+ """
+ test zone creation
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "installed",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_delete(mocked_zone_delete, capfd):
+ """
+ test zone deletion
+ """
+ set_module_args(
+ {
+ "name": "z1",
+ "state": "absent",
+ "path": "/zones/z1",
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert not results.get("failed")
+ assert results["changed"]
+
+
+def test_zone_create_invalid_names(mocked_zone_create, capfd):
+ """
+ test zone creation with invalid names
+ """
+ # 1. Invalid character ('!').
+ # 2. Zone name > 64 characters.
+ # 3. Zone name beginning with non-alphanumeric character.
+ for invalid_name in ('foo!bar', 'z' * 65, '_zone'):
+ set_module_args(
+ {
+ "name": invalid_name,
+ "state": "installed",
+ "path": "/zones/" + invalid_name,
+ "_ansible_check_mode": False,
+ }
+ )
+ with pytest.raises(SystemExit):
+ solaris_zone.main()
+
+ out, err = capfd.readouterr()
+ results = json.loads(out)
+ assert results.get("failed")
| solaris_zone: zone configuration fails with python3
<!--- Verify first that your issue is not already reported on GitHub -->
<!--- Also test if the latest release and devel branch are affected too -->
<!--- Complete *all* sections as described, this form is processed automatically -->
##### SUMMARY
<!--- Explain the problem briefly below -->
Type error when trying to create a Solaris Zone with the `solaris_zone` module:
##### ISSUE TYPE
- Bug Report
##### COMPONENT NAME
<!--- Write the short name of the module, plugin, task or feature below, use your best guess if unsure -->
`solaris_zone`
##### ANSIBLE VERSION
<!--- Paste verbatim output from "ansible --version" between quotes -->
```paste below
ansible 2.10.1
config file = /etc/ansible/ansible.cfg
configured module search path = ['/home/jbronn/.ansible/plugins/modules', '/usr/share/ansible/plugins/modules']
ansible python module location = /home/jbronn/.local/lib/python3.8/site-packages/ansible
executable location = /home/jbronn/.local/bin/ansible-playbook
python version = 3.8.5 (default, Jul 28 2020, 12:59:40) [GCC 9.3.0]
```
##### CONFIGURATION
N/A
##### OS / ENVIRONMENT
OmniOS CE r151034t (Illumos); target host Python is 3.7.5.
##### STEPS TO REPRODUCE
<!--- Describe exactly how to reproduce the problem, using a minimal test-case -->
<!--- Paste example playbooks or commands between quotes below -->
```yaml
- name: 'solaris zones'
hosts: all
become: true
tasks:
- solaris_zone:
name: z1
state: installed
path: /zones/z1
vars:
ansible_python_interpreter: '/usr/bin/python3'
```
<!--- HINT: You can paste gist.github.com links for larger files -->
##### EXPECTED RESULTS
<!--- Describe what you expected to happen when running the steps above -->
The zone, `z1`, should be configured and installed.
##### ACTUAL RESULTS
Running the playbook produces this traceback:
```
Traceback (most recent call last):
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 102, in <module>
_ansiballz_main()
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 94, in _ansiballz_main
invoke_module(zipped_mod, temp_path, ANSIBALLZ_PARAMS)
File "/export/home/jbronn/.ansible/tmp/ansible-tmp-1602198687.9610054-1444903-128778670541170/AnsiballZ_solaris_zone.py", line 40, in invoke_module
runpy.run_module(mod_name='ansible_collections.community.general.plugins.modules.solaris_zone', init_globals=None, run_name='__main__', alter_sys=True)
File "/usr/lib/python3.7/runpy.py", line 205, in run_module
return _run_module_code(code, init_globals, run_name, mod_spec)
File "/usr/lib/python3.7/runpy.py", line 96, in _run_module_code
mod_name, mod_spec, pkg_name, script_name)
File "/usr/lib/python3.7/runpy.py", line 85, in _run_code
exec(code, run_globals)
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 486, in <module>
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 468, in main
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 395, in state_present
File "/tmp/ansible_solaris_zone_payload_p87jcqod/ansible_solaris_zone_payload.zip/ansible_collections/community/general/plugins/modules/solaris_zone.py", line 202, in configure
File "/usr/lib/python3.7/tempfile.py", line 481, in func_wrapper
return func(*args, **kwargs)
TypeError: a bytes-like object is required, not 'str'
```
|
pymedusa__Medusa-6208 | [
{
"content": "# coding=utf-8\n\n\"\"\"Provider code for Binsearch provider.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\nfrom builtins import zip\nfrom os.path import join\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size, sanitize_filename\nfrom medusa.helpers import download_file\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.nzb.nzb_provider import NZBProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BinSearchProvider(NZBProvider):\n \"\"\"BinSearch Newznab provider.\"\"\"\n\n size_regex = re.compile(r'size: (\\d+\\.\\d+\\xa0\\w{2}), parts', re.I)\n title_regex = re.compile(r'\\\"([^\\\"]+)\"', re.I)\n title_reqex_clean = re.compile(r'^[ \\d_]+ (.+)')\n title_regex_rss = re.compile(r'- \\\"([^\\\"]+)\"', re.I)\n nzb_check_segment = re.compile(r'<segment bytes=\"[\\d]+\"')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BinSearchProvider, self).__init__('BinSearch')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.binsearch.info'\n self.urls = {\n 'search': urljoin(self.url, 'index.php'),\n 'rss': urljoin(self.url, 'browse.php'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=10)\n\n def search(self, search_strings, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n search_params = {\n 'adv_age': '',\n 'xminsize': 20,\n 'max': 250,\n }\n groups = [1, 2]\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n # https://www.binsearch.info/browse.php?bg=alt.binaries.teevee&server=2\n for search_string in search_strings[mode]:\n search_params['q'] = search_string\n for group in groups:\n # Try both 'search in the most popular groups' & 'search in the other groups' modes\n search_params['server'] = group\n if mode != 'RSS':\n log.debug('Search string: {search}', {'search': search_string})\n search_url = self.urls['search']\n else:\n search_params = {\n 'bg': 'alt.binaries.teevee',\n 'server': 2,\n 'max': 50,\n }\n search_url = self.urls['rss']\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n def process_column_header(td):\n return td.get_text(strip=True).lower()\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n\n # We need to store the post url, to be used with every result later on.\n post_url = html.find('form', {'method': 'post'})['action']\n\n table = html.find('table', class_='xMenuT')\n rows = table('tr') if table else []\n row_offset = 1\n if not rows or not len(rows) - row_offset:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n headers = rows[0]('th')\n # 0, 1, subject, poster, group, age\n labels = [process_column_header(header) or idx\n for idx, header in enumerate(headers)]\n\n # Skip column headers\n rows = rows[row_offset:]\n for row in rows:\n try:\n col = dict(list(zip(labels, row('td'))))\n nzb_id_input = col[0 if mode == 'RSS' else 1].find('input')\n if not nzb_id_input:\n continue\n nzb_id = nzb_id_input['name']\n # Try and get the the article subject from the weird binsearch format\n title = self.clean_title(col['subject'].text, mode)\n\n except AttributeError:\n log.debug('Parsing rows, that may not always have useful info. Skipping to next.')\n continue\n if not all([title, nzb_id]):\n continue\n\n # Obtain the size from the 'description'\n size_field = BinSearchProvider.size_regex.search(col['subject'].text)\n if size_field:\n size_field = size_field.group(1)\n size = convert_size(size_field, sep='\\xa0') or -1\n size = int(size)\n\n download_url = urljoin(self.url, '{post_url}|nzb_id={nzb_id}'.format(post_url=post_url, nzb_id=nzb_id))\n\n # For future use\n # detail_url = 'https://www.binsearch.info/?q={0}'.format(title)\n human_time = True\n date = col['age' if mode != 'RSS' else 'date'].get_text(strip=True).replace('-', ' ')\n if mode == 'RSS':\n human_time = False\n pubdate_raw = date\n pubdate = self.parse_pubdate(pubdate_raw, human_time=human_time)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0}', title)\n\n items.append(item)\n\n return items\n\n @staticmethod\n def clean_title(title, mode):\n \"\"\"\n Clean title field, using a series of regex.\n\n RSS search requires different cleaning then the other searches.\n When adding to this function, make sure you update the tests.\n \"\"\"\n try:\n if mode == 'RSS':\n title = BinSearchProvider.title_regex_rss.search(title).group(1)\n else:\n title = BinSearchProvider.title_regex.search(title).group(1)\n if BinSearchProvider.title_reqex_clean.search(title):\n title = BinSearchProvider.title_reqex_clean.search(title).group(1)\n for extension in ('.nfo', '.par2', '.rar', '.zip', '.nzb', '.part'):\n # Strip extensions that aren't part of the file name\n if title.endswith(extension):\n title = title[:len(title) - len(extension)]\n return title\n except AttributeError:\n return None\n\n def download_result(self, result):\n \"\"\"\n Download result from provider.\n\n This is used when a blackhole is used for sending the nzb file to the nzb client.\n For now the url and the post data is stored as one string in the db, using a pipe (|) to separate them.\n\n :param result: A SearchResult object.\n :return: The result of the nzb download (True/False).\n \"\"\"\n if not self.login():\n return False\n\n result_name = sanitize_filename(result.name)\n filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)\n\n if result.url.startswith('http'):\n self.session.headers.update({\n 'Referer': '/'.join(result.url.split('/')[:3]) + '/'\n })\n\n log.info('Downloading {result} from {provider} at {url}',\n {'result': result.name, 'provider': self.name, 'url': result.url})\n\n verify = False if self.public else None\n\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n if download_file(url, filename, method='POST', data=data, session=self.session,\n headers=self.headers, verify=verify):\n\n if self._verify_download(filename):\n log.info('Saved {result} to {location}',\n {'result': result.name, 'location': filename})\n return True\n\n return False\n\n def download_nzb_for_post(self, result):\n \"\"\"\n Download the nzb content, prior to sending it to the nzb download client.\n\n :param result: Nzb SearchResult object.\n :return: The content of the nzb file if successful else None.\n \"\"\"\n if not self.login():\n return False\n\n # For now to separate the url and the post data, where splitting it with a pipe.\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n log.info('Downloading {result} from {provider} at {url} and data {data}',\n {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data})\n\n verify = False if self.public else None\n\n response = self.session.post(url, data=data, headers=self.session.headers,\n verify=verify, hooks={}, allow_redirects=True)\n if not response or not response.content:\n log.warning('Failed to download the NZB from BinSearch')\n return None\n\n # Validate that the result has the content of a valid nzb.\n if not BinSearchProvider.nzb_check_segment.search(response.content):\n log.warning('Result returned from BinSearch was not a valid NZB')\n return None\n\n return response.content\n\n def _get_size(self, item):\n \"\"\"\n Get result size.\n\n Overwrite this, as the default _get_size() from nzb_provider isn't working for us.\n :param item:\n :return: size in bytes or -1\n \"\"\"\n return item.get('size', -1)\n\n\nprovider = BinSearchProvider()\n",
"path": "medusa/providers/nzb/binsearch.py"
}
] | [
{
"content": "# coding=utf-8\n\n\"\"\"Provider code for Binsearch provider.\"\"\"\n\nfrom __future__ import unicode_literals\n\nimport logging\nimport re\nfrom builtins import zip\nfrom os.path import join\n\nfrom medusa import tv\nfrom medusa.bs4_parser import BS4Parser\nfrom medusa.helper.common import convert_size, sanitize_filename\nfrom medusa.helpers import download_file\nfrom medusa.logger.adapters.style import BraceAdapter\nfrom medusa.providers.nzb.nzb_provider import NZBProvider\n\nfrom requests.compat import urljoin\n\nlog = BraceAdapter(logging.getLogger(__name__))\nlog.logger.addHandler(logging.NullHandler())\n\n\nclass BinSearchProvider(NZBProvider):\n \"\"\"BinSearch Newznab provider.\"\"\"\n\n size_regex = re.compile(r'size: (\\d+\\.\\d+\\xa0\\w{2}), parts', re.I)\n title_regex = re.compile(r'\\\"([^\\\"]+)\"', re.I)\n title_reqex_clean = re.compile(r'^[ \\d_]+ (.+)')\n title_regex_rss = re.compile(r'- \\\"([^\\\"]+)\"', re.I)\n nzb_check_segment = re.compile(r'<segment bytes=\"[\\d]+\"')\n\n def __init__(self):\n \"\"\"Initialize the class.\"\"\"\n super(BinSearchProvider, self).__init__('BinSearch')\n\n # Credentials\n self.public = True\n\n # URLs\n self.url = 'https://www.binsearch.info'\n self.urls = {\n 'search': urljoin(self.url, 'index.php'),\n 'rss': urljoin(self.url, 'browse.php'),\n }\n\n # Proper Strings\n self.proper_strings = ['PROPER', 'REPACK', 'REAL', 'RERIP']\n\n # Miscellaneous Options\n\n # Cache\n self.cache = tv.Cache(self, min_time=10)\n\n def search(self, search_strings, **kwargs):\n \"\"\"\n Search a provider and parse the results.\n\n :param search_strings: A dict with mode (key) and the search value (value)\n :returns: A list of search results (structure)\n \"\"\"\n results = []\n search_params = {\n 'adv_age': '',\n 'xminsize': 20,\n 'max': 250,\n }\n groups = [1, 2]\n\n for mode in search_strings:\n log.debug('Search mode: {0}', mode)\n # https://www.binsearch.info/browse.php?bg=alt.binaries.teevee&server=2\n for search_string in search_strings[mode]:\n search_params['q'] = search_string\n for group in groups:\n # Try both 'search in the most popular groups' & 'search in the other groups' modes\n search_params['server'] = group\n if mode != 'RSS':\n log.debug('Search string: {search}', {'search': search_string})\n search_url = self.urls['search']\n else:\n search_params = {\n 'bg': 'alt.binaries.teevee',\n 'server': 2,\n 'max': 50,\n }\n search_url = self.urls['rss']\n\n response = self.session.get(search_url, params=search_params)\n if not response or not response.text:\n log.debug('No data returned from provider')\n continue\n\n results += self.parse(response.text, mode)\n\n return results\n\n def parse(self, data, mode):\n \"\"\"\n Parse search results for items.\n\n :param data: The raw response from a search\n :param mode: The current mode used to search, e.g. RSS\n\n :return: A list of items found\n \"\"\"\n def process_column_header(td):\n return td.get_text(strip=True).lower()\n\n items = []\n\n with BS4Parser(data, 'html5lib') as html:\n\n # We need to store the post url, to be used with every result later on.\n post_url = html.find('form', {'method': 'post'})['action']\n\n table = html.find('table', class_='xMenuT')\n rows = table('tr') if table else []\n row_offset = 1\n if not rows or not len(rows) - row_offset:\n log.debug('Data returned from provider does not contain any torrents')\n return items\n\n headers = rows[0]('th')\n # 0, 1, subject, poster, group, age\n labels = [process_column_header(header) or idx\n for idx, header in enumerate(headers)]\n\n # Skip column headers\n rows = rows[row_offset:]\n for row in rows:\n try:\n col = dict(list(zip(labels, row('td'))))\n nzb_id_input = col[0 if mode == 'RSS' else 1].find('input')\n if not nzb_id_input:\n continue\n nzb_id = nzb_id_input['name']\n # Try and get the the article subject from the weird binsearch format\n title = self.clean_title(col['subject'].text, mode)\n\n except AttributeError:\n log.debug('Parsing rows, that may not always have useful info. Skipping to next.')\n continue\n if not all([title, nzb_id]):\n continue\n\n # Obtain the size from the 'description'\n size_field = BinSearchProvider.size_regex.search(col['subject'].text)\n if size_field:\n size_field = size_field.group(1)\n size = convert_size(size_field, sep='\\xa0') or -1\n size = int(size)\n\n download_url = urljoin(self.url, '{post_url}|nzb_id={nzb_id}'.format(post_url=post_url, nzb_id=nzb_id))\n\n # For future use\n # detail_url = 'https://www.binsearch.info/?q={0}'.format(title)\n human_time = True\n date = col['age' if mode != 'RSS' else 'date'].get_text(strip=True).replace('-', ' ')\n if mode == 'RSS':\n human_time = False\n pubdate_raw = date\n pubdate = self.parse_pubdate(pubdate_raw, human_time=human_time)\n\n item = {\n 'title': title,\n 'link': download_url,\n 'size': size,\n 'pubdate': pubdate,\n }\n if mode != 'RSS':\n log.debug('Found result: {0}', title)\n\n items.append(item)\n\n return items\n\n @staticmethod\n def clean_title(title, mode):\n \"\"\"\n Clean title field, using a series of regex.\n\n RSS search requires different cleaning then the other searches.\n When adding to this function, make sure you update the tests.\n \"\"\"\n try:\n if mode == 'RSS':\n title = BinSearchProvider.title_regex_rss.search(title).group(1)\n else:\n title = BinSearchProvider.title_regex.search(title).group(1)\n if BinSearchProvider.title_reqex_clean.search(title):\n title = BinSearchProvider.title_reqex_clean.search(title).group(1)\n for extension in ('.nfo', '.par2', '.rar', '.zip', '.nzb', '.part'):\n # Strip extensions that aren't part of the file name\n if title.endswith(extension):\n title = title[:len(title) - len(extension)]\n return title\n except AttributeError:\n return None\n\n def download_result(self, result):\n \"\"\"\n Download result from provider.\n\n This is used when a blackhole is used for sending the nzb file to the nzb client.\n For now the url and the post data is stored as one string in the db, using a pipe (|) to separate them.\n\n :param result: A SearchResult object.\n :return: The result of the nzb download (True/False).\n \"\"\"\n if not self.login():\n return False\n\n result_name = sanitize_filename(result.name)\n filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)\n\n if result.url.startswith('http'):\n self.session.headers.update({\n 'Referer': '/'.join(result.url.split('/')[:3]) + '/'\n })\n\n log.info('Downloading {result} from {provider} at {url}',\n {'result': result.name, 'provider': self.name, 'url': result.url})\n\n verify = False if self.public else None\n\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n if download_file(url, filename, method='POST', data=data, session=self.session,\n headers=self.headers, verify=verify):\n\n if self._verify_download(filename):\n log.info('Saved {result} to {location}',\n {'result': result.name, 'location': filename})\n return True\n\n return False\n\n def download_nzb_for_post(self, result):\n \"\"\"\n Download the nzb content, prior to sending it to the nzb download client.\n\n :param result: Nzb SearchResult object.\n :return: The content of the nzb file if successful else None.\n \"\"\"\n if not self.login():\n return False\n\n # For now to separate the url and the post data, where splitting it with a pipe.\n url, data = result.url.split('|')\n\n data = {\n data.split('=')[1]: 'on',\n 'action': 'nzb',\n }\n\n log.info('Downloading {result} from {provider} at {url} and data {data}',\n {'result': result.name, 'provider': self.name, 'url': result.url, 'data': data})\n\n verify = False if self.public else None\n\n response = self.session.post(url, data=data, headers=self.session.headers,\n verify=verify, hooks={}, allow_redirects=True)\n if not response or not response.content:\n log.warning('Failed to download the NZB from BinSearch')\n return None\n\n # Validate that the result has the content of a valid nzb.\n if not BinSearchProvider.nzb_check_segment.search(response.text):\n log.warning('Result returned from BinSearch was not a valid NZB')\n return None\n\n return response.content\n\n def _get_size(self, item):\n \"\"\"\n Get result size.\n\n Overwrite this, as the default _get_size() from nzb_provider isn't working for us.\n :param item:\n :return: size in bytes or -1\n \"\"\"\n return item.get('size', -1)\n\n\nprovider = BinSearchProvider()\n",
"path": "medusa/providers/nzb/binsearch.py"
}
] | diff --git a/medusa/providers/nzb/binsearch.py b/medusa/providers/nzb/binsearch.py
index 6e01f31858..88dd22d147 100644
--- a/medusa/providers/nzb/binsearch.py
+++ b/medusa/providers/nzb/binsearch.py
@@ -272,7 +272,7 @@ def download_nzb_for_post(self, result):
return None
# Validate that the result has the content of a valid nzb.
- if not BinSearchProvider.nzb_check_segment.search(response.content):
+ if not BinSearchProvider.nzb_check_segment.search(response.text):
log.warning('Result returned from BinSearch was not a valid NZB')
return None
| [APP SUBMITTED]: TypeError: cannot use a string pattern on a bytes-like object
### INFO
**Python Version**: `3.7.2 (default, Jan 3 2019, 02:55:40) [GCC 8.2.0]`
**Operating System**: `Linux-4.9.35-v7+-armv7l-with-debian-buster-sid`
**Locale**: `UTF-8`
**Branch**: [develop](../tree/develop)
**Database**: `44.14`
**Commit**: pymedusa/Medusa@18bd87dded99e1ecfbeae7757e226ea5510e0f96
**Link to Log**: https://gist.github.com/4421b6f5dd716b24746e97ed3008b0c4
### ERROR
<pre>
2019-02-10 19:30:40 ERROR SNATCHQUEUE-SNATCH-526 :: [18bd87d] Snatch failed! For result: The.Office.(US).S03.1080p.WEB-DL.AAC2.0.AVC-TrollHD
Traceback (most recent call last):
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/queue.py#L503">medusa/search/queue.py</a>", line 503, in run
self.success = snatch_episode(result)
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/search/core.py#L132">medusa/search/core.py</a>", line 132, in snatch_episode
nzb_data = result.provider.download_nzb_for_post(result)
File "/home/pi/Medusa/<a href="../blob/18bd87dded99e1ecfbeae7757e226ea5510e0f96/medusa/providers/nzb/binsearch.py#L275">medusa/providers/nzb/binsearch.py</a>", line 275, in download_nzb_for_post
if not BinSearchProvider.nzb_check_segment.search(response.content):
TypeError: cannot use a string pattern on a bytes-like object
</pre>
---
_STAFF NOTIFIED_: @pymedusa/support @pymedusa/moderators
|
goauthentik__authentik-3769 | [
{
"content": "\"\"\"root settings for authentik\"\"\"\n\nimport importlib\nimport logging\nimport os\nfrom hashlib import sha512\nfrom urllib.parse import quote_plus\n\nimport structlog\nfrom celery.schedules import crontab\nfrom sentry_sdk import set_tag\n\nfrom authentik import ENV_GIT_HASH_KEY, __version__\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.logging import add_process_id\nfrom authentik.lib.sentry import sentry_init\nfrom authentik.lib.utils.reflection import get_env\nfrom authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT, BACKEND_LDAP\n\nLOGGER = structlog.get_logger()\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nSTATIC_ROOT = BASE_DIR + \"/static\"\nSTATICFILES_DIRS = [BASE_DIR + \"/web\"]\nMEDIA_ROOT = BASE_DIR + \"/media\"\n\nDEBUG = CONFIG.y_bool(\"debug\")\nSECRET_KEY = CONFIG.y(\"secret_key\")\n\nINTERNAL_IPS = [\"127.0.0.1\"]\nALLOWED_HOSTS = [\"*\"]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_CROSS_ORIGIN_OPENER_POLICY = None\nLOGIN_URL = \"authentik_flows:default-authentication\"\n\n# Custom user model\nAUTH_USER_MODEL = \"authentik_core.User\"\n\nCSRF_COOKIE_NAME = \"authentik_csrf\"\nCSRF_HEADER_NAME = \"HTTP_X_AUTHENTIK_CSRF\"\nLANGUAGE_COOKIE_NAME = \"authentik_language\"\nSESSION_COOKIE_NAME = \"authentik_session\"\nSESSION_COOKIE_DOMAIN = CONFIG.y(\"cookie_domain\", None)\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n BACKEND_INBUILT,\n BACKEND_APP_PASSWORD,\n BACKEND_LDAP,\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Application definition\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"authentik.admin\",\n \"authentik.api\",\n \"authentik.crypto\",\n \"authentik.events\",\n \"authentik.flows\",\n \"authentik.lib\",\n \"authentik.outposts\",\n \"authentik.policies.dummy\",\n \"authentik.policies.event_matcher\",\n \"authentik.policies.expiry\",\n \"authentik.policies.expression\",\n \"authentik.policies.hibp\",\n \"authentik.policies.password\",\n \"authentik.policies.reputation\",\n \"authentik.policies\",\n \"authentik.providers.ldap\",\n \"authentik.providers.oauth2\",\n \"authentik.providers.proxy\",\n \"authentik.providers.saml\",\n \"authentik.recovery\",\n \"authentik.sources.ldap\",\n \"authentik.sources.oauth\",\n \"authentik.sources.plex\",\n \"authentik.sources.saml\",\n \"authentik.stages.authenticator_duo\",\n \"authentik.stages.authenticator_sms\",\n \"authentik.stages.authenticator_static\",\n \"authentik.stages.authenticator_totp\",\n \"authentik.stages.authenticator_validate\",\n \"authentik.stages.authenticator_webauthn\",\n \"authentik.stages.captcha\",\n \"authentik.stages.consent\",\n \"authentik.stages.deny\",\n \"authentik.stages.dummy\",\n \"authentik.stages.email\",\n \"authentik.stages.identification\",\n \"authentik.stages.invitation\",\n \"authentik.stages.password\",\n \"authentik.stages.prompt\",\n \"authentik.stages.user_delete\",\n \"authentik.stages.user_login\",\n \"authentik.stages.user_logout\",\n \"authentik.stages.user_write\",\n \"authentik.tenants\",\n \"authentik.blueprints\",\n \"rest_framework\",\n \"django_filters\",\n \"drf_spectacular\",\n \"guardian\",\n \"django_prometheus\",\n \"channels\",\n]\n\nGUARDIAN_MONKEY_PATCH = False\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"authentik\",\n \"DESCRIPTION\": \"Making authentication simple.\",\n \"VERSION\": __version__,\n \"COMPONENT_SPLIT_REQUEST\": True,\n \"SCHEMA_PATH_PREFIX\": \"/api/v([0-9]+(beta)?)\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n \"SERVERS\": [\n {\n \"url\": \"/api/v3/\",\n },\n ],\n \"CONTACT\": {\n \"email\": \"[email protected]\",\n },\n \"AUTHENTICATION_WHITELIST\": [\"authentik.api.authentication.TokenAuthentication\"],\n \"LICENSE\": {\n \"name\": \"GNU GPLv3\",\n \"url\": \"https://github.com/goauthentik/authentik/blob/main/LICENSE\",\n },\n \"ENUM_NAME_OVERRIDES\": {\n \"EventActions\": \"authentik.events.models.EventAction\",\n \"ChallengeChoices\": \"authentik.flows.challenge.ChallengeTypes\",\n \"FlowDesignationEnum\": \"authentik.flows.models.FlowDesignation\",\n \"PolicyEngineMode\": \"authentik.policies.models.PolicyEngineMode\",\n \"ProxyMode\": \"authentik.providers.proxy.models.ProxyMode\",\n \"PromptTypeEnum\": \"authentik.stages.prompt.models.FieldTypes\",\n \"LDAPAPIAccessMode\": \"authentik.providers.ldap.models.APIAccessMode\",\n },\n \"ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE\": False,\n \"POSTPROCESSING_HOOKS\": [\n \"authentik.api.schema.postprocess_schema_responses\",\n \"drf_spectacular.hooks.postprocess_schema_enums\",\n ],\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PAGINATION_CLASS\": \"authentik.api.pagination.Pagination\",\n \"PAGE_SIZE\": 100,\n \"DEFAULT_FILTER_BACKENDS\": [\n \"rest_framework_guardian.filters.ObjectPermissionsFilter\",\n \"django_filters.rest_framework.DjangoFilterBackend\",\n \"rest_framework.filters.OrderingFilter\",\n \"rest_framework.filters.SearchFilter\",\n ],\n \"DEFAULT_PARSER_CLASSES\": [\n \"rest_framework.parsers.JSONParser\",\n ],\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.DjangoObjectPermissions\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"authentik.api.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n}\n\nREDIS_PROTOCOL_PREFIX = \"redis://\"\nREDIS_CELERY_TLS_REQUIREMENTS = \"\"\nif CONFIG.y_bool(\"redis.tls\", False):\n REDIS_PROTOCOL_PREFIX = \"rediss://\"\n REDIS_CELERY_TLS_REQUIREMENTS = f\"?ssl_cert_reqs={CONFIG.y('redis.tls_reqs')}\"\n_redis_url = (\n f\"{REDIS_PROTOCOL_PREFIX}:\"\n f\"{quote_plus(CONFIG.y('redis.password'))}@{quote_plus(CONFIG.y('redis.host'))}:\"\n f\"{int(CONFIG.y('redis.port'))}\"\n)\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"{_redis_url}/{CONFIG.y('redis.cache_db')}\",\n \"TIMEOUT\": int(CONFIG.y(\"redis.cache_timeout\", 300)),\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n }\n}\nDJANGO_REDIS_SCAN_ITERSIZE = 1000\nDJANGO_REDIS_IGNORE_EXCEPTIONS = True\nDJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_SERIALIZER = \"django.contrib.sessions.serializers.PickleSerializer\"\nSESSION_CACHE_ALIAS = \"default\"\n# Configured via custom SessionMiddleware\n# SESSION_COOKIE_SAMESITE = \"None\"\n# SESSION_COOKIE_SECURE = True\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nMESSAGE_STORAGE = \"authentik.root.messages.storage.ChannelsStorage\"\n\nMIDDLEWARE = [\n \"authentik.root.middleware.LoggingMiddleware\",\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"authentik.root.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"authentik.core.middleware.RequestIDMiddleware\",\n \"authentik.tenants.middleware.TenantMiddleware\",\n \"authentik.events.middleware.AuditMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"authentik.core.middleware.ImpersonateMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"authentik.root.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [CONFIG.y(\"email.template_dir\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"authentik.tenants.utils.context_processor\",\n ],\n },\n },\n]\n\nASGI_APPLICATION = \"authentik.root.asgi.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [f\"{_redis_url}/{CONFIG.y('redis.ws_db')}\"],\n },\n },\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django_prometheus.db.backends.postgresql\",\n \"HOST\": CONFIG.y(\"postgresql.host\"),\n \"NAME\": CONFIG.y(\"postgresql.name\"),\n \"USER\": CONFIG.y(\"postgresql.user\"),\n \"PASSWORD\": CONFIG.y(\"postgresql.password\"),\n \"PORT\": int(CONFIG.y(\"postgresql.port\")),\n }\n}\n\n# Email\nEMAIL_HOST = CONFIG.y(\"email.host\")\nEMAIL_PORT = int(CONFIG.y(\"email.port\"))\nEMAIL_HOST_USER = CONFIG.y(\"email.username\")\nEMAIL_HOST_PASSWORD = CONFIG.y(\"email.password\")\nEMAIL_USE_TLS = CONFIG.y_bool(\"email.use_tls\", False)\nEMAIL_USE_SSL = CONFIG.y_bool(\"email.use_ssl\", False)\nEMAIL_TIMEOUT = int(CONFIG.y(\"email.timeout\"))\nDEFAULT_FROM_EMAIL = CONFIG.y(\"email.from\")\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\nEMAIL_SUBJECT_PREFIX = \"[authentik] \"\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = [\"./locale\"]\n\n# Celery settings\n# Add a 10 minute timeout to all Celery tasks.\nCELERY_TASK_SOFT_TIME_LIMIT = 600\nCELERY_WORKER_MAX_TASKS_PER_CHILD = 50\nCELERY_WORKER_CONCURRENCY = 2\nCELERY_BEAT_SCHEDULE = {\n \"clean_expired_models\": {\n \"task\": \"authentik.core.tasks.clean_expired_models\",\n \"schedule\": crontab(minute=\"2-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n \"user_cleanup\": {\n \"task\": \"authentik.core.tasks.clean_temporary_users\",\n \"schedule\": crontab(minute=\"9-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n}\nCELERY_TASK_CREATE_MISSING_QUEUES = True\nCELERY_TASK_DEFAULT_QUEUE = \"authentik\"\nCELERY_BROKER_URL = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\nCELERY_RESULT_BACKEND = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\n\n# Sentry integration\nenv = get_env()\n_ERROR_REPORTING = CONFIG.y_bool(\"error_reporting.enabled\", False)\nif _ERROR_REPORTING:\n sentry_env = CONFIG.y(\"error_reporting.environment\", \"customer\")\n sentry_init()\n set_tag(\"authentik.uuid\", sha512(str(SECRET_KEY).encode(\"ascii\")).hexdigest()[:16])\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\n\nTEST = False\nTEST_RUNNER = \"authentik.root.test_runner.PytestTestRunner\"\n# We can't check TEST here as its set later by the test runner\nLOG_LEVEL = CONFIG.y(\"log_level\").upper() if \"TF_BUILD\" not in os.environ else \"DEBUG\"\n# We could add a custom level to stdlib logging and structlog, but it's not easy or clean\n# https://stackoverflow.com/questions/54505487/custom-log-level-not-working-with-structlog\n# Additionally, the entire code uses debug as highest level so that would have to be re-written too\nif LOG_LEVEL == \"TRACE\":\n LOG_LEVEL = \"DEBUG\"\n\nstructlog.configure_once(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.contextvars.merge_contextvars,\n add_process_id,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt=\"iso\", utc=False),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.dict_tracebacks,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.make_filtering_bound_logger(\n getattr(logging, LOG_LEVEL, logging.WARNING)\n ),\n cache_logger_on_first_use=True,\n)\n\nLOG_PRE_CHAIN = [\n # Add the log level and a timestamp to the event_dict if the log entry\n # is not from structlog.\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.processors.TimeStamper(),\n structlog.processors.StackInfoRenderer(),\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"json\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.processors.JSONRenderer(sort_keys=True),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n \"console\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=DEBUG),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\" if DEBUG else \"json\",\n },\n },\n \"loggers\": {},\n}\n\n_LOGGING_HANDLER_MAP = {\n \"\": LOG_LEVEL,\n \"authentik\": LOG_LEVEL,\n \"django\": \"WARNING\",\n \"celery\": \"WARNING\",\n \"selenium\": \"WARNING\",\n \"docker\": \"WARNING\",\n \"urllib3\": \"WARNING\",\n \"websockets\": \"WARNING\",\n \"daphne\": \"WARNING\",\n \"kubernetes\": \"INFO\",\n \"asyncio\": \"WARNING\",\n \"redis\": \"WARNING\",\n \"silk\": \"INFO\",\n}\nfor handler_name, level in _LOGGING_HANDLER_MAP.items():\n # pyright: reportGeneralTypeIssues=false\n LOGGING[\"loggers\"][handler_name] = {\n \"handlers\": [\"console\"],\n \"level\": level,\n \"propagate\": False,\n }\n\n\n_DISALLOWED_ITEMS = [\n \"INSTALLED_APPS\",\n \"MIDDLEWARE\",\n \"AUTHENTICATION_BACKENDS\",\n \"CELERY_BEAT_SCHEDULE\",\n]\n# Load subapps's INSTALLED_APPS\nfor _app in INSTALLED_APPS:\n if _app.startswith(\"authentik\"):\n if \"apps\" in _app:\n _app = \".\".join(_app.split(\".\")[:-2])\n try:\n app_settings = importlib.import_module(f\"{_app}.settings\")\n INSTALLED_APPS.extend(getattr(app_settings, \"INSTALLED_APPS\", []))\n MIDDLEWARE.extend(getattr(app_settings, \"MIDDLEWARE\", []))\n AUTHENTICATION_BACKENDS.extend(getattr(app_settings, \"AUTHENTICATION_BACKENDS\", []))\n CELERY_BEAT_SCHEDULE.update(getattr(app_settings, \"CELERY_BEAT_SCHEDULE\", {}))\n for _attr in dir(app_settings):\n if not _attr.startswith(\"__\") and _attr not in _DISALLOWED_ITEMS:\n globals()[_attr] = getattr(app_settings, _attr)\n except ImportError:\n pass\n\nif DEBUG:\n CELERY_TASK_ALWAYS_EAGER = True\n os.environ[ENV_GIT_HASH_KEY] = \"dev\"\n INSTALLED_APPS.append(\"silk\")\n SILKY_PYTHON_PROFILER = True\n MIDDLEWARE = [\"silk.middleware.SilkyMiddleware\"] + MIDDLEWARE\n\nINSTALLED_APPS.append(\"authentik.core\")\n\nCONFIG.log(\"info\", \"Booting authentik\", version=__version__)\n",
"path": "authentik/root/settings.py"
}
] | [
{
"content": "\"\"\"root settings for authentik\"\"\"\n\nimport importlib\nimport logging\nimport os\nfrom hashlib import sha512\nfrom urllib.parse import quote_plus\n\nimport structlog\nfrom celery.schedules import crontab\nfrom sentry_sdk import set_tag\n\nfrom authentik import ENV_GIT_HASH_KEY, __version__\nfrom authentik.lib.config import CONFIG\nfrom authentik.lib.logging import add_process_id\nfrom authentik.lib.sentry import sentry_init\nfrom authentik.lib.utils.reflection import get_env\nfrom authentik.stages.password import BACKEND_APP_PASSWORD, BACKEND_INBUILT, BACKEND_LDAP\n\nLOGGER = structlog.get_logger()\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nSTATIC_ROOT = BASE_DIR + \"/static\"\nSTATICFILES_DIRS = [BASE_DIR + \"/web\"]\nMEDIA_ROOT = BASE_DIR + \"/media\"\n\nDEBUG = CONFIG.y_bool(\"debug\")\nSECRET_KEY = CONFIG.y(\"secret_key\")\n\nINTERNAL_IPS = [\"127.0.0.1\"]\nALLOWED_HOSTS = [\"*\"]\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_CROSS_ORIGIN_OPENER_POLICY = None\nLOGIN_URL = \"authentik_flows:default-authentication\"\n\n# Custom user model\nAUTH_USER_MODEL = \"authentik_core.User\"\n\nCSRF_COOKIE_NAME = \"authentik_csrf\"\nCSRF_HEADER_NAME = \"HTTP_X_AUTHENTIK_CSRF\"\nLANGUAGE_COOKIE_NAME = \"authentik_language\"\nSESSION_COOKIE_NAME = \"authentik_session\"\nSESSION_COOKIE_DOMAIN = CONFIG.y(\"cookie_domain\", None)\n\nAUTHENTICATION_BACKENDS = [\n \"django.contrib.auth.backends.ModelBackend\",\n BACKEND_INBUILT,\n BACKEND_APP_PASSWORD,\n BACKEND_LDAP,\n \"guardian.backends.ObjectPermissionBackend\",\n]\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Application definition\nINSTALLED_APPS = [\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django.contrib.humanize\",\n \"authentik.admin\",\n \"authentik.api\",\n \"authentik.crypto\",\n \"authentik.events\",\n \"authentik.flows\",\n \"authentik.lib\",\n \"authentik.outposts\",\n \"authentik.policies.dummy\",\n \"authentik.policies.event_matcher\",\n \"authentik.policies.expiry\",\n \"authentik.policies.expression\",\n \"authentik.policies.hibp\",\n \"authentik.policies.password\",\n \"authentik.policies.reputation\",\n \"authentik.policies\",\n \"authentik.providers.ldap\",\n \"authentik.providers.oauth2\",\n \"authentik.providers.proxy\",\n \"authentik.providers.saml\",\n \"authentik.recovery\",\n \"authentik.sources.ldap\",\n \"authentik.sources.oauth\",\n \"authentik.sources.plex\",\n \"authentik.sources.saml\",\n \"authentik.stages.authenticator_duo\",\n \"authentik.stages.authenticator_sms\",\n \"authentik.stages.authenticator_static\",\n \"authentik.stages.authenticator_totp\",\n \"authentik.stages.authenticator_validate\",\n \"authentik.stages.authenticator_webauthn\",\n \"authentik.stages.captcha\",\n \"authentik.stages.consent\",\n \"authentik.stages.deny\",\n \"authentik.stages.dummy\",\n \"authentik.stages.email\",\n \"authentik.stages.identification\",\n \"authentik.stages.invitation\",\n \"authentik.stages.password\",\n \"authentik.stages.prompt\",\n \"authentik.stages.user_delete\",\n \"authentik.stages.user_login\",\n \"authentik.stages.user_logout\",\n \"authentik.stages.user_write\",\n \"authentik.tenants\",\n \"authentik.blueprints\",\n \"rest_framework\",\n \"django_filters\",\n \"drf_spectacular\",\n \"guardian\",\n \"django_prometheus\",\n \"channels\",\n]\n\nGUARDIAN_MONKEY_PATCH = False\n\nSPECTACULAR_SETTINGS = {\n \"TITLE\": \"authentik\",\n \"DESCRIPTION\": \"Making authentication simple.\",\n \"VERSION\": __version__,\n \"COMPONENT_SPLIT_REQUEST\": True,\n \"SCHEMA_PATH_PREFIX\": \"/api/v([0-9]+(beta)?)\",\n \"SCHEMA_PATH_PREFIX_TRIM\": True,\n \"SERVERS\": [\n {\n \"url\": \"/api/v3/\",\n },\n ],\n \"CONTACT\": {\n \"email\": \"[email protected]\",\n },\n \"AUTHENTICATION_WHITELIST\": [\"authentik.api.authentication.TokenAuthentication\"],\n \"LICENSE\": {\n \"name\": \"GNU GPLv3\",\n \"url\": \"https://github.com/goauthentik/authentik/blob/main/LICENSE\",\n },\n \"ENUM_NAME_OVERRIDES\": {\n \"EventActions\": \"authentik.events.models.EventAction\",\n \"ChallengeChoices\": \"authentik.flows.challenge.ChallengeTypes\",\n \"FlowDesignationEnum\": \"authentik.flows.models.FlowDesignation\",\n \"PolicyEngineMode\": \"authentik.policies.models.PolicyEngineMode\",\n \"ProxyMode\": \"authentik.providers.proxy.models.ProxyMode\",\n \"PromptTypeEnum\": \"authentik.stages.prompt.models.FieldTypes\",\n \"LDAPAPIAccessMode\": \"authentik.providers.ldap.models.APIAccessMode\",\n },\n \"ENUM_ADD_EXPLICIT_BLANK_NULL_CHOICE\": False,\n \"POSTPROCESSING_HOOKS\": [\n \"authentik.api.schema.postprocess_schema_responses\",\n \"drf_spectacular.hooks.postprocess_schema_enums\",\n ],\n}\n\nREST_FRAMEWORK = {\n \"DEFAULT_PAGINATION_CLASS\": \"authentik.api.pagination.Pagination\",\n \"PAGE_SIZE\": 100,\n \"DEFAULT_FILTER_BACKENDS\": [\n \"rest_framework_guardian.filters.ObjectPermissionsFilter\",\n \"django_filters.rest_framework.DjangoFilterBackend\",\n \"rest_framework.filters.OrderingFilter\",\n \"rest_framework.filters.SearchFilter\",\n ],\n \"DEFAULT_PARSER_CLASSES\": [\n \"rest_framework.parsers.JSONParser\",\n ],\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.DjangoObjectPermissions\",),\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"authentik.api.authentication.TokenAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n \"DEFAULT_RENDERER_CLASSES\": [\n \"rest_framework.renderers.JSONRenderer\",\n ],\n \"DEFAULT_SCHEMA_CLASS\": \"drf_spectacular.openapi.AutoSchema\",\n \"TEST_REQUEST_DEFAULT_FORMAT\": \"json\",\n}\n\nREDIS_PROTOCOL_PREFIX = \"redis://\"\nREDIS_CELERY_TLS_REQUIREMENTS = \"\"\nif CONFIG.y_bool(\"redis.tls\", False):\n REDIS_PROTOCOL_PREFIX = \"rediss://\"\n REDIS_CELERY_TLS_REQUIREMENTS = f\"?ssl_cert_reqs={CONFIG.y('redis.tls_reqs')}\"\n_redis_url = (\n f\"{REDIS_PROTOCOL_PREFIX}:\"\n f\"{quote_plus(CONFIG.y('redis.password'))}@{quote_plus(CONFIG.y('redis.host'))}:\"\n f\"{int(CONFIG.y('redis.port'))}\"\n)\n\nCACHES = {\n \"default\": {\n \"BACKEND\": \"django_redis.cache.RedisCache\",\n \"LOCATION\": f\"{_redis_url}/{CONFIG.y('redis.cache_db')}\",\n \"TIMEOUT\": int(CONFIG.y(\"redis.cache_timeout\", 300)),\n \"OPTIONS\": {\"CLIENT_CLASS\": \"django_redis.client.DefaultClient\"},\n }\n}\nDJANGO_REDIS_SCAN_ITERSIZE = 1000\nDJANGO_REDIS_IGNORE_EXCEPTIONS = True\nDJANGO_REDIS_LOG_IGNORED_EXCEPTIONS = True\nSESSION_ENGINE = \"django.contrib.sessions.backends.cache\"\nSESSION_SERIALIZER = \"django.contrib.sessions.serializers.PickleSerializer\"\nSESSION_CACHE_ALIAS = \"default\"\n# Configured via custom SessionMiddleware\n# SESSION_COOKIE_SAMESITE = \"None\"\n# SESSION_COOKIE_SECURE = True\nSESSION_EXPIRE_AT_BROWSER_CLOSE = True\n\nMESSAGE_STORAGE = \"authentik.root.messages.storage.ChannelsStorage\"\n\nMIDDLEWARE = [\n \"authentik.root.middleware.LoggingMiddleware\",\n \"django_prometheus.middleware.PrometheusBeforeMiddleware\",\n \"authentik.root.middleware.SessionMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n \"authentik.core.middleware.RequestIDMiddleware\",\n \"authentik.tenants.middleware.TenantMiddleware\",\n \"authentik.events.middleware.AuditMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n \"authentik.core.middleware.ImpersonateMiddleware\",\n \"django_prometheus.middleware.PrometheusAfterMiddleware\",\n]\n\nROOT_URLCONF = \"authentik.root.urls\"\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [CONFIG.y(\"email.template_dir\")],\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n \"authentik.tenants.utils.context_processor\",\n ],\n },\n },\n]\n\nASGI_APPLICATION = \"authentik.root.asgi.application\"\n\nCHANNEL_LAYERS = {\n \"default\": {\n \"BACKEND\": \"channels_redis.core.RedisChannelLayer\",\n \"CONFIG\": {\n \"hosts\": [f\"{_redis_url}/{CONFIG.y('redis.ws_db')}\"],\n },\n },\n}\n\n\n# Database\n# https://docs.djangoproject.com/en/2.1/ref/settings/#databases\n\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django_prometheus.db.backends.postgresql\",\n \"HOST\": CONFIG.y(\"postgresql.host\"),\n \"NAME\": CONFIG.y(\"postgresql.name\"),\n \"USER\": CONFIG.y(\"postgresql.user\"),\n \"PASSWORD\": CONFIG.y(\"postgresql.password\"),\n \"PORT\": int(CONFIG.y(\"postgresql.port\")),\n }\n}\n\nif CONFIG.y_bool(\"postgresql.use_pgbouncer\", False):\n # https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors\n DATABASES[\"default\"][\"DISABLE_SERVER_SIDE_CURSORS\"] = True\n # https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections\n DATABASES[\"default\"][\"CONN_MAX_AGE\"] = None # persistent\n\n# Email\nEMAIL_HOST = CONFIG.y(\"email.host\")\nEMAIL_PORT = int(CONFIG.y(\"email.port\"))\nEMAIL_HOST_USER = CONFIG.y(\"email.username\")\nEMAIL_HOST_PASSWORD = CONFIG.y(\"email.password\")\nEMAIL_USE_TLS = CONFIG.y_bool(\"email.use_tls\", False)\nEMAIL_USE_SSL = CONFIG.y_bool(\"email.use_ssl\", False)\nEMAIL_TIMEOUT = int(CONFIG.y(\"email.timeout\"))\nDEFAULT_FROM_EMAIL = CONFIG.y(\"email.from\")\nSERVER_EMAIL = DEFAULT_FROM_EMAIL\nEMAIL_SUBJECT_PREFIX = \"[authentik] \"\n\n# Password validation\n# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\",\n },\n {\"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\"},\n {\"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\"},\n]\n\n\n# Internationalization\n# https://docs.djangoproject.com/en/2.1/topics/i18n/\n\nLANGUAGE_CODE = \"en-us\"\n\nTIME_ZONE = \"UTC\"\n\nUSE_I18N = True\n\nUSE_TZ = True\n\nLOCALE_PATHS = [\"./locale\"]\n\n# Celery settings\n# Add a 10 minute timeout to all Celery tasks.\nCELERY_TASK_SOFT_TIME_LIMIT = 600\nCELERY_WORKER_MAX_TASKS_PER_CHILD = 50\nCELERY_WORKER_CONCURRENCY = 2\nCELERY_BEAT_SCHEDULE = {\n \"clean_expired_models\": {\n \"task\": \"authentik.core.tasks.clean_expired_models\",\n \"schedule\": crontab(minute=\"2-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n \"user_cleanup\": {\n \"task\": \"authentik.core.tasks.clean_temporary_users\",\n \"schedule\": crontab(minute=\"9-59/5\"),\n \"options\": {\"queue\": \"authentik_scheduled\"},\n },\n}\nCELERY_TASK_CREATE_MISSING_QUEUES = True\nCELERY_TASK_DEFAULT_QUEUE = \"authentik\"\nCELERY_BROKER_URL = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\nCELERY_RESULT_BACKEND = (\n f\"{_redis_url}/{CONFIG.y('redis.message_queue_db')}{REDIS_CELERY_TLS_REQUIREMENTS}\"\n)\n\n# Sentry integration\nenv = get_env()\n_ERROR_REPORTING = CONFIG.y_bool(\"error_reporting.enabled\", False)\nif _ERROR_REPORTING:\n sentry_env = CONFIG.y(\"error_reporting.environment\", \"customer\")\n sentry_init()\n set_tag(\"authentik.uuid\", sha512(str(SECRET_KEY).encode(\"ascii\")).hexdigest()[:16])\n\n\n# Static files (CSS, JavaScript, Images)\n# https://docs.djangoproject.com/en/2.1/howto/static-files/\n\nSTATIC_URL = \"/static/\"\nMEDIA_URL = \"/media/\"\n\nTEST = False\nTEST_RUNNER = \"authentik.root.test_runner.PytestTestRunner\"\n# We can't check TEST here as its set later by the test runner\nLOG_LEVEL = CONFIG.y(\"log_level\").upper() if \"TF_BUILD\" not in os.environ else \"DEBUG\"\n# We could add a custom level to stdlib logging and structlog, but it's not easy or clean\n# https://stackoverflow.com/questions/54505487/custom-log-level-not-working-with-structlog\n# Additionally, the entire code uses debug as highest level so that would have to be re-written too\nif LOG_LEVEL == \"TRACE\":\n LOG_LEVEL = \"DEBUG\"\n\nstructlog.configure_once(\n processors=[\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.contextvars.merge_contextvars,\n add_process_id,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt=\"iso\", utc=False),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.dict_tracebacks,\n structlog.stdlib.ProcessorFormatter.wrap_for_formatter,\n ],\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.make_filtering_bound_logger(\n getattr(logging, LOG_LEVEL, logging.WARNING)\n ),\n cache_logger_on_first_use=True,\n)\n\nLOG_PRE_CHAIN = [\n # Add the log level and a timestamp to the event_dict if the log entry\n # is not from structlog.\n structlog.stdlib.add_log_level,\n structlog.stdlib.add_logger_name,\n structlog.processors.TimeStamper(),\n structlog.processors.StackInfoRenderer(),\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\n \"json\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.processors.JSONRenderer(sort_keys=True),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n \"console\": {\n \"()\": structlog.stdlib.ProcessorFormatter,\n \"processor\": structlog.dev.ConsoleRenderer(colors=DEBUG),\n \"foreign_pre_chain\": LOG_PRE_CHAIN,\n },\n },\n \"handlers\": {\n \"console\": {\n \"level\": \"DEBUG\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\" if DEBUG else \"json\",\n },\n },\n \"loggers\": {},\n}\n\n_LOGGING_HANDLER_MAP = {\n \"\": LOG_LEVEL,\n \"authentik\": LOG_LEVEL,\n \"django\": \"WARNING\",\n \"celery\": \"WARNING\",\n \"selenium\": \"WARNING\",\n \"docker\": \"WARNING\",\n \"urllib3\": \"WARNING\",\n \"websockets\": \"WARNING\",\n \"daphne\": \"WARNING\",\n \"kubernetes\": \"INFO\",\n \"asyncio\": \"WARNING\",\n \"redis\": \"WARNING\",\n \"silk\": \"INFO\",\n}\nfor handler_name, level in _LOGGING_HANDLER_MAP.items():\n # pyright: reportGeneralTypeIssues=false\n LOGGING[\"loggers\"][handler_name] = {\n \"handlers\": [\"console\"],\n \"level\": level,\n \"propagate\": False,\n }\n\n\n_DISALLOWED_ITEMS = [\n \"INSTALLED_APPS\",\n \"MIDDLEWARE\",\n \"AUTHENTICATION_BACKENDS\",\n \"CELERY_BEAT_SCHEDULE\",\n]\n# Load subapps's INSTALLED_APPS\nfor _app in INSTALLED_APPS:\n if _app.startswith(\"authentik\"):\n if \"apps\" in _app:\n _app = \".\".join(_app.split(\".\")[:-2])\n try:\n app_settings = importlib.import_module(f\"{_app}.settings\")\n INSTALLED_APPS.extend(getattr(app_settings, \"INSTALLED_APPS\", []))\n MIDDLEWARE.extend(getattr(app_settings, \"MIDDLEWARE\", []))\n AUTHENTICATION_BACKENDS.extend(getattr(app_settings, \"AUTHENTICATION_BACKENDS\", []))\n CELERY_BEAT_SCHEDULE.update(getattr(app_settings, \"CELERY_BEAT_SCHEDULE\", {}))\n for _attr in dir(app_settings):\n if not _attr.startswith(\"__\") and _attr not in _DISALLOWED_ITEMS:\n globals()[_attr] = getattr(app_settings, _attr)\n except ImportError:\n pass\n\nif DEBUG:\n CELERY_TASK_ALWAYS_EAGER = True\n os.environ[ENV_GIT_HASH_KEY] = \"dev\"\n INSTALLED_APPS.append(\"silk\")\n SILKY_PYTHON_PROFILER = True\n MIDDLEWARE = [\"silk.middleware.SilkyMiddleware\"] + MIDDLEWARE\n\nINSTALLED_APPS.append(\"authentik.core\")\n\nCONFIG.log(\"info\", \"Booting authentik\", version=__version__)\n",
"path": "authentik/root/settings.py"
}
] | diff --git a/authentik/lib/default.yml b/authentik/lib/default.yml
index 5a2428cd6db3..44925bf5acb1 100644
--- a/authentik/lib/default.yml
+++ b/authentik/lib/default.yml
@@ -6,6 +6,7 @@ postgresql:
user: authentik
port: 5432
password: 'env://POSTGRES_PASSWORD'
+ use_pgbouncer: false
listen:
listen_http: 0.0.0.0:9000
diff --git a/authentik/root/settings.py b/authentik/root/settings.py
index a91babefa484..ee2a52c8411a 100644
--- a/authentik/root/settings.py
+++ b/authentik/root/settings.py
@@ -270,6 +270,12 @@
}
}
+if CONFIG.y_bool("postgresql.use_pgbouncer", False):
+ # https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors
+ DATABASES["default"]["DISABLE_SERVER_SIDE_CURSORS"] = True
+ # https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections
+ DATABASES["default"]["CONN_MAX_AGE"] = None # persistent
+
# Email
EMAIL_HOST = CONFIG.y("email.host")
EMAIL_PORT = int(CONFIG.y("email.port"))
diff --git a/website/docs/installation/configuration.md b/website/docs/installation/configuration.md
index ee3283574385..b6876f9ff9f1 100644
--- a/website/docs/installation/configuration.md
+++ b/website/docs/installation/configuration.md
@@ -32,6 +32,7 @@ kubectl exec -it deployment/authentik-worker -c authentik -- ak dump_config
- `AUTHENTIK_POSTGRESQL__USER`: Database user
- `AUTHENTIK_POSTGRESQL__PORT`: Database port, defaults to 5432
- `AUTHENTIK_POSTGRESQL__PASSWORD`: Database password, defaults to the environment variable `POSTGRES_PASSWORD`
+- `AUTHENTIK_POSTGRESQL__USE_PGBOUNCER`: Adjust configuration to support connection to PgBouncer
## Redis Settings
| Support HA postgresql
When using a HA installation of postgresql connection problems with the api server arise during initial load of the user dashboard.
Pgbouncer in transaction pooling mode requires custom settings in order to function correctly.
It would be nice if the user could specify that a HA installation is used and the settings are then adjusted automatically.
```
# https://docs.djangoproject.com/en/4.0/ref/databases/#transaction-pooling-server-side-cursors
DATABASES['default']['DISABLE_SERVER_SIDE_CURSORS'] = True
# https://docs.djangoproject.com/en/4.0/ref/databases/#persistent-connections
DATABASES['default']['CONN_MAX_AGE'] = None # persistent
```
Currently the settings do not include these options:
https://github.com/goauthentik/authentik/blob/89c84f10d0e6e70e51444a9fbf18980ba25008de/authentik/root/settings.py#L279-L288
|
django-extensions__django-extensions-1150 | [
{
"content": "# -*- coding: utf-8 -*-\nimport os\nimport fnmatch\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.color import color_style\nfrom django.template.loader import get_template\n\nfrom django_extensions.compat import get_template_setting\nfrom django_extensions.management.utils import signalcommand\n\n\n#\n# TODO: Render the template with fake request object ?\n#\n\n\nclass Command(BaseCommand):\n args = ''\n help = \"Validate templates on syntax and compile errors\"\n ignores = set([\n \".DS_Store\",\n \"*.swp\",\n \"*~\",\n ])\n\n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n '--no-apps', action='store_true', dest='no_apps',\n default=False, help=\"Do not automatically include apps.\")\n parser.add_argument(\n '--break', '-b', action='store_true', dest='break',\n default=False, help=\"Break on first error.\")\n parser.add_argument(\n '--include', '-i', action='append', dest='includes',\n default=[], help=\"Append these paths to TEMPLATE DIRS\")\n parser.add_argument(\n '--ignore-app', action='append', dest='ignore_apps',\n default=[], help=\"Ignore these apps\")\n\n def ignore_filename(self, filename):\n filename = os.path.basename(filename)\n for ignore_pattern in self.ignores:\n if fnmatch.fnmatch(filename, ignore_pattern):\n return True\n return False\n\n @signalcommand\n def handle(self, *args, **options):\n if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):\n self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')\n\n style = color_style()\n template_dirs = set(get_template_setting('DIRS'))\n template_dirs |= set(options.get('includes', []))\n template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))\n\n if not options['no_apps']:\n ignore_apps = options['ignore_apps']\n if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):\n ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')\n for app in apps.get_app_configs():\n if app.name in ignore_apps:\n continue\n app_template_dir = os.path.join(app.path, 'templates')\n if os.path.isdir(app_template_dir):\n template_dirs.add(app_template_dir)\n\n # This is unsafe:\n # https://docs.djangoproject.com/en/1.10/topics/settings/#altering-settings-at-runtime\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['DIRS'] = list(template_dirs)\n else:\n settings.TEMPLATE_DIRS = list(template_dirs)\n settings.TEMPLATE_DEBUG = True\n verbosity = int(options.get('verbosity', 1))\n errors = 0\n\n for template_dir in template_dirs:\n for root, dirs, filenames in os.walk(template_dir):\n for filename in filenames:\n if self.ignore_filename(filename):\n continue\n\n filepath = os.path.realpath(os.path.join(root, filename))\n if verbosity > 1:\n print(filepath)\n try:\n get_template(filepath)\n except Exception as e:\n errors += 1\n print(\"%s: %s\" % (filepath, style.ERROR(\"%s %s\" % (e.__class__.__name__, str(e)))))\n if errors and options.get('break', False):\n raise CommandError(\"Errors found\")\n\n if errors:\n raise CommandError(\"%s errors found\" % errors)\n print(\"%s errors found\" % errors)\n",
"path": "django_extensions/management/commands/validate_templates.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nimport os\nimport fnmatch\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom django.core.management.color import color_style\nfrom django.template.loader import get_template\n\nfrom django_extensions.compat import get_template_setting\nfrom django_extensions.management.utils import signalcommand\n\n\n#\n# TODO: Render the template with fake request object ?\n#\n\n\nclass Command(BaseCommand):\n args = ''\n help = \"Validate templates on syntax and compile errors\"\n ignores = set([\n \".DS_Store\",\n \"*.swp\",\n \"*~\",\n ])\n\n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n parser.add_argument(\n '--no-apps', action='store_true', dest='no_apps',\n default=False, help=\"Do not automatically include apps.\")\n parser.add_argument(\n '--break', '-b', action='store_true', dest='break',\n default=False, help=\"Break on first error.\")\n parser.add_argument(\n '--include', '-i', action='append', dest='includes',\n default=[], help=\"Append these paths to TEMPLATE DIRS\")\n parser.add_argument(\n '--ignore-app', action='append', dest='ignore_apps',\n default=[], help=\"Ignore these apps\")\n\n def ignore_filename(self, filename):\n filename = os.path.basename(filename)\n for ignore_pattern in self.ignores:\n if fnmatch.fnmatch(filename, ignore_pattern):\n return True\n return False\n\n @signalcommand\n def handle(self, *args, **options):\n if hasattr(settings, 'VALIDATE_TEMPLATES_IGNORES'):\n self.ignores = getattr(settings, 'VALIDATE_TEMPLATES_IGNORES')\n\n style = color_style()\n template_dirs = set(get_template_setting('DIRS'))\n template_dirs |= set(options.get('includes', []))\n template_dirs |= set(getattr(settings, 'VALIDATE_TEMPLATES_EXTRA_TEMPLATE_DIRS', []))\n\n if not options['no_apps']:\n ignore_apps = options['ignore_apps']\n if not ignore_apps and hasattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS'):\n ignore_apps = getattr(settings, 'VALIDATE_TEMPLATES_IGNORE_APPS')\n for app in apps.get_app_configs():\n if app.name in ignore_apps:\n continue\n app_template_dir = os.path.join(app.path, 'templates')\n if os.path.isdir(app_template_dir):\n template_dirs.add(app_template_dir)\n\n # This is unsafe:\n # https://docs.djangoproject.com/en/1.10/topics/settings/#altering-settings-at-runtime\n if hasattr(settings, 'TEMPLATES'):\n settings.TEMPLATES[0]['DIRS'] = list(template_dirs)\n else:\n settings.TEMPLATE_DIRS = list(template_dirs)\n settings.TEMPLATE_DEBUG = True\n verbosity = int(options.get('verbosity', 1))\n errors = 0\n\n for template_dir in template_dirs:\n for root, dirs, filenames in os.walk(template_dir):\n for filename in filenames:\n if self.ignore_filename(filename):\n continue\n\n filepath = os.path.join(root, filename)\n if verbosity > 1:\n print(filepath)\n try:\n get_template(filepath)\n except Exception as e:\n errors += 1\n print(\"%s: %s\" % (filepath, style.ERROR(\"%s %s\" % (e.__class__.__name__, str(e)))))\n if errors and options.get('break', False):\n raise CommandError(\"Errors found\")\n\n if errors:\n raise CommandError(\"%s errors found\" % errors)\n print(\"%s errors found\" % errors)\n",
"path": "django_extensions/management/commands/validate_templates.py"
}
] | diff --git a/django_extensions/management/commands/validate_templates.py b/django_extensions/management/commands/validate_templates.py
index 0e648be15..dffba7cd1 100644
--- a/django_extensions/management/commands/validate_templates.py
+++ b/django_extensions/management/commands/validate_templates.py
@@ -85,7 +85,7 @@ def handle(self, *args, **options):
if self.ignore_filename(filename):
continue
- filepath = os.path.realpath(os.path.join(root, filename))
+ filepath = os.path.join(root, filename)
if verbosity > 1:
print(filepath)
try:
| validate_template raised false problems
Since 1.9.8 I got this problem on our internal ci system but the files exists on the server on the path.
Locally there is no error raised.
```
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django/contrib/auth/templates/registration/password_reset_subject.txt
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/widgets/foreignkey_searchinput.html
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/label.dot
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/digraph.dot
/home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot: TemplateDoesNotExist /home/django/.virtualenvs/abb38d6f6e8bbb5c224330bba0513c93/lib/python2.7/site-packages/django_extensions/templates/django_extensions/graph_models/relation.dot
CommandError: 5 errors found
```
|
xonsh__xonsh-4879 | [
{
"content": "\"\"\"Aliases for the xonsh shell.\"\"\"\nimport argparse\nimport collections.abc as cabc\nimport functools\nimport inspect\nimport os\nimport re\nimport sys\nimport types\nimport typing as tp\n\nimport xonsh.completers._aliases as xca\nimport xonsh.history.main as xhm\nimport xonsh.xoreutils.which as xxw\nfrom xonsh.ast import isexpression\nfrom xonsh.built_ins import XSH\nfrom xonsh.cli_utils import Annotated, Arg, ArgParserAlias\nfrom xonsh.dirstack import _get_cwd, cd, dirs, popd, pushd\nfrom xonsh.environ import locate_binary, make_args_env\nfrom xonsh.foreign_shells import foreign_shell_data\nfrom xonsh.jobs import bg, clean_jobs, disown, fg, jobs\nfrom xonsh.lazyasd import lazyobject\nfrom xonsh.platform import (\n IN_APPIMAGE,\n ON_ANACONDA,\n ON_DARWIN,\n ON_DRAGONFLY,\n ON_FREEBSD,\n ON_NETBSD,\n ON_OPENBSD,\n ON_WINDOWS,\n)\nfrom xonsh.timings import timeit_alias\nfrom xonsh.tools import (\n ALIAS_KWARG_NAMES,\n XonshError,\n adjust_shlvl,\n argvquote,\n escape_windows_cmd_string,\n print_color,\n strip_simple_quotes,\n swap_values,\n to_repr_pretty_,\n to_shlvl,\n unthreadable,\n)\nfrom xonsh.xontribs import xontribs_main\n\n\n@lazyobject\ndef EXEC_ALIAS_RE():\n return re.compile(r\"@\\(|\\$\\(|!\\(|\\$\\[|!\\[|\\&\\&|\\|\\||\\s+and\\s+|\\s+or\\s+|[>|<]\")\n\n\nclass Aliases(cabc.MutableMapping):\n \"\"\"Represents a location to hold and look up aliases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._raw = {}\n self.update(*args, **kwargs)\n\n @staticmethod\n def _get_func_name(func):\n name = func.__name__\n\n # Strip leading underscore\n if name.startswith(\"_\"):\n name = name[1:]\n return name\n\n def _register(self, func, name=\"\", dash_case=True):\n name = name or self._get_func_name(func)\n\n if dash_case:\n name = name.replace(\"_\", \"-\")\n\n self[name] = func\n return func\n\n @tp.overload\n def register(self, func: types.FunctionType) -> types.FunctionType:\n \"\"\"simple usage\"\"\"\n\n @tp.overload\n def register(\n self, name: str, *, dash_case: bool = True\n ) -> tp.Callable[[types.FunctionType], types.FunctionType]:\n ...\n\n def register(self, func_or_name, name=None, dash_case=True):\n \"\"\"Decorator to register the given function by name.\"\"\"\n\n if isinstance(func_or_name, types.FunctionType):\n return self._register(func_or_name, name, dash_case)\n\n def wrapper(func):\n return self._register(func, func_or_name, dash_case)\n\n return wrapper\n\n def get(self, key, default=None):\n \"\"\"Returns the (possibly modified) value. If the key is not present,\n then `default` is returned.\n If the value is callable, it is returned without modification. If it\n is an iterable of strings it will be evaluated recursively to expand\n other aliases, resulting in a new list or a \"partially applied\"\n callable.\n \"\"\"\n val = self._raw.get(key)\n if val is None:\n return default\n elif isinstance(val, cabc.Iterable) or callable(val):\n return self.eval_alias(val, seen_tokens={key})\n else:\n msg = \"alias of {!r} has an inappropriate type: {!r}\"\n raise TypeError(msg.format(key, val))\n\n def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()):\n \"\"\"\n \"Evaluates\" the alias ``value``, by recursively looking up the leftmost\n token and \"expanding\" if it's also an alias.\n\n A value like ``[\"cmd\", \"arg\"]`` might transform like this:\n ``> [\"cmd\", \"arg\"] -> [\"ls\", \"-al\", \"arg\"] -> callable()``\n where ``cmd=ls -al`` and ``ls`` is an alias with its value being a\n callable. The resulting callable will be \"partially applied\" with\n ``[\"-al\", \"arg\"]``.\n \"\"\"\n # Beware of mutability: default values for keyword args are evaluated\n # only once.\n if callable(value):\n return partial_eval_alias(value, acc_args=acc_args)\n else:\n expand_path = XSH.expand_path\n token, *rest = map(expand_path, value)\n if token in seen_tokens or token not in self._raw:\n # ^ Making sure things like `egrep=egrep --color=auto` works,\n # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF`\n # and `ls=ls --color=auto`\n rtn = [token]\n rtn.extend(rest)\n rtn.extend(acc_args)\n return rtn\n else:\n seen_tokens = seen_tokens | {token}\n acc_args = rest + list(acc_args)\n return self.eval_alias(self._raw[token], seen_tokens, acc_args)\n\n def expand_alias(self, line: str, cursor_index: int) -> str:\n \"\"\"Expands any aliases present in line if alias does not point to a\n builtin function and if alias is only a single command.\n The command won't be expanded if the cursor's inside/behind it.\n \"\"\"\n word = (line.split(maxsplit=1) or [\"\"])[0]\n if word in XSH.aliases and isinstance(self.get(word), cabc.Sequence): # type: ignore\n word_idx = line.find(word)\n word_edge = word_idx + len(word)\n if cursor_index > word_edge:\n # the cursor isn't inside/behind the word\n expansion = \" \".join(self.get(word))\n line = line[:word_idx] + expansion + line[word_edge:]\n return line\n\n #\n # Mutable mapping interface\n #\n\n def __getitem__(self, key):\n return self._raw[key]\n\n def __setitem__(self, key, val):\n if isinstance(val, str):\n f = \"<exec-alias:\" + key + \">\"\n if EXEC_ALIAS_RE.search(val) is not None:\n # We have a sub-command (e.g. $(cmd)) or IO redirect (e.g. >>)\n self._raw[key] = ExecAlias(val, filename=f)\n elif isexpression(val):\n # expansion substitution\n lexer = XSH.execer.parser.lexer\n self._raw[key] = list(map(strip_simple_quotes, lexer.split(val)))\n else:\n # need to exec alias\n self._raw[key] = ExecAlias(val, filename=f)\n else:\n self._raw[key] = val\n\n def _common_or(self, other):\n new_dict = self._raw.copy()\n for key in dict(other):\n new_dict[key] = other[key]\n return Aliases(new_dict)\n\n def __or__(self, other):\n return self._common_or(other)\n\n def __ror__(self, other):\n return self._common_or(other)\n\n def __ior__(self, other):\n for key in dict(other):\n self[key] = other[key]\n return self\n\n def __delitem__(self, key):\n del self._raw[key]\n\n def update(self, *args, **kwargs):\n for key, val in dict(*args, **kwargs).items():\n self[key] = val\n\n def __iter__(self):\n yield from self._raw\n\n def __len__(self):\n return len(self._raw)\n\n def __str__(self):\n return str(self._raw)\n\n def __repr__(self):\n return \"{}.{}({})\".format(\n self.__class__.__module__, self.__class__.__name__, self._raw\n )\n\n _repr_pretty_ = to_repr_pretty_\n\n\nclass ExecAlias:\n \"\"\"Provides a callable alias for xonsh source code.\"\"\"\n\n def __init__(self, src, filename=\"<exec-alias>\"):\n \"\"\"\n Parameters\n ----------\n src : str\n Source code that will be\n \"\"\"\n self.src = src\n self.filename = filename\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n execer = XSH.execer\n frame = stack[0][0] # execute as though we are at the call site\n\n alias_args = {\"args\": args}\n for i, a in enumerate(args):\n alias_args[f\"arg{i}\"] = a\n\n with XSH.env.swap(alias_args):\n execer.exec(\n self.src,\n glbs=frame.f_globals,\n locs=frame.f_locals,\n filename=self.filename,\n )\n if XSH.history is not None:\n return XSH.history.last_cmd_rtn\n\n def __repr__(self):\n return f\"ExecAlias({self.src!r}, filename={self.filename!r})\"\n\n\nclass PartialEvalAliasBase:\n \"\"\"Partially evaluated alias.\"\"\"\n\n def __init__(self, f, acc_args=()):\n \"\"\"\n Parameters\n ----------\n f : callable\n A function to dispatch to.\n acc_args : sequence of strings, optional\n Additional arguments to prepent to the argument list passed in\n when the alias is called.\n \"\"\"\n self.f = f\n self.acc_args = acc_args\n self.__name__ = getattr(f, \"__name__\", self.__class__.__name__)\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n def __repr__(self):\n return \"{name}({f!r}, acc_args={acc_args!r})\".format(\n name=self.__class__.__name__, f=self.f, acc_args=self.acc_args\n )\n\n\nclass PartialEvalAlias0(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n if args:\n msg = \"callable alias {f!r} takes no arguments, but {args!f} provided. \"\n msg += \"Of these {acc_args!r} were partially applied.\"\n raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args))\n return self.f()\n\n\nclass PartialEvalAlias1(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args)\n\n\nclass PartialEvalAlias2(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin)\n\n\nclass PartialEvalAlias3(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout)\n\n\nclass PartialEvalAlias4(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr)\n\n\nclass PartialEvalAlias5(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec)\n\n\nclass PartialEvalAlias6(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n\nPARTIAL_EVAL_ALIASES = (\n PartialEvalAlias0,\n PartialEvalAlias1,\n PartialEvalAlias2,\n PartialEvalAlias3,\n PartialEvalAlias4,\n PartialEvalAlias5,\n PartialEvalAlias6,\n)\n\n\ndef partial_eval_alias(f, acc_args=()):\n \"\"\"Dispatches the appropriate eval alias based on the number of args to the original callable alias\n and how many arguments to apply.\n \"\"\"\n # no partial needed if no extra args\n if not acc_args:\n return f\n # need to dispatch\n numargs = 0\n for name, param in inspect.signature(f).parameters.items():\n if (\n param.kind == param.POSITIONAL_ONLY\n or param.kind == param.POSITIONAL_OR_KEYWORD\n ):\n numargs += 1\n elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:\n numargs += 1\n if numargs < 7:\n return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args)\n else:\n e = \"Expected proxy with 6 or fewer arguments for {}, not {}\"\n raise XonshError(e.format(\", \".join(ALIAS_KWARG_NAMES), numargs))\n\n\n#\n# Actual aliases below\n#\n\n\ndef xonsh_exit(args, stdin=None):\n \"\"\"Sends signal to exit shell.\"\"\"\n if not clean_jobs():\n # Do not exit if jobs not cleaned up\n return None, None\n XSH.exit = True\n print() # gimme a newline\n return None, None\n\n\ndef xonsh_reset(args, stdin=None):\n \"\"\"Clears __xonsh__.ctx\"\"\"\n XSH.ctx.clear()\n\n\ndef source_foreign_fn(\n shell: str,\n files_or_code: Annotated[tp.List[str], Arg(nargs=\"+\")],\n interactive=True,\n login=False,\n envcmd=None,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n prevcmd=\"\",\n postcmd=\"\",\n funcscmd=\"\",\n sourcer=None,\n use_tmpfile=False,\n seterrprevcmd=None,\n seterrpostcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"Sources a file written in a foreign shell language.\n\n Parameters\n ----------\n shell\n Name or path to the foreign shell\n files_or_code\n file paths to source or code in the target language.\n interactive : -n, --non-interactive\n whether the sourced shell should be interactive\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -u, --unsafe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n prevcmd : -p, --prevcmd\n command(s) to run before any other commands, replaces traditional source.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n sourcer : --sourcer\n the source command in the target shell language.\n If this is not set, a default value will attempt to be\n looked up based on the shell name.\n use_tmpfile : --use-tmpfile\n whether the commands for source shell should be written to a temporary file.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n seterrpostcmd : --seterrpostcmd\n command(s) to set exit-on-error after all other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n extra_args = tuple(extra_args.split())\n env = XSH.env\n suppress_skip_message = (\n env.get(\"FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE\")\n if not suppress_skip_message\n else suppress_skip_message\n )\n files: tp.Tuple[str, ...] = ()\n if prevcmd:\n pass # don't change prevcmd if given explicitly\n elif os.path.isfile(files_or_code[0]):\n if not sourcer:\n return (None, \"xonsh: error: `sourcer` command is not mentioned.\\n\", 1)\n # we have filenames to source\n prevcmd = \"\".join([f\"{sourcer} {f}\\n\" for f in files_or_code])\n files = tuple(files_or_code)\n elif not prevcmd:\n prevcmd = \" \".join(files_or_code) # code to run, no files\n foreign_shell_data.cache_clear() # make sure that we don't get prev src\n fsenv, fsaliases = foreign_shell_data(\n shell=shell,\n login=login,\n interactive=interactive,\n envcmd=envcmd,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n prevcmd=prevcmd,\n postcmd=postcmd,\n funcscmd=funcscmd or None, # the default is None in the called function\n sourcer=sourcer,\n use_tmpfile=use_tmpfile,\n seterrprevcmd=seterrprevcmd,\n seterrpostcmd=seterrpostcmd,\n show=show,\n dryrun=dryrun,\n files=files,\n )\n if fsenv is None:\n if dryrun:\n return\n else:\n msg = f\"xonsh: error: Source failed: {prevcmd!r}\\n\"\n msg += \"xonsh: error: Possible reasons: File not found or syntax error\\n\"\n return (None, msg, 1)\n # apply results\n denv = env.detype()\n for k, v in fsenv.items():\n if k == \"SHLVL\": # ignore $SHLVL as sourcing should not change $SHLVL\n continue\n if k in denv and v == denv[k]:\n continue # no change from original\n env[k] = v\n # Remove any env-vars that were unset by the script.\n for k in denv:\n if k not in fsenv:\n env.pop(k, None)\n # Update aliases\n baliases = XSH.aliases\n for k, v in fsaliases.items():\n if k in baliases and v == baliases[k]:\n continue # no change from original\n elif overwrite_aliases or k not in baliases:\n baliases[k] = v\n elif suppress_skip_message:\n pass\n else:\n msg = (\n \"Skipping application of {0!r} alias from {1!r} \"\n \"since it shares a name with an existing xonsh alias. \"\n 'Use \"--overwrite-alias\" option to apply it anyway.'\n 'You may prevent this message with \"--suppress-skip-message\" or '\n '\"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True\".'\n )\n print(msg.format(k, shell), file=_stderr)\n\n\nsource_foreign = ArgParserAlias(\n func=source_foreign_fn, has_args=True, prog=\"source-foreign\"\n)\n\n\n@unthreadable\ndef source_alias(args, stdin=None):\n \"\"\"Executes the contents of the provided files in the current context.\n If sourced file isn't found in cwd, search for file along $PATH to source\n instead.\n \"\"\"\n env = XSH.env\n encoding = env.get(\"XONSH_ENCODING\")\n errors = env.get(\"XONSH_ENCODING_ERRORS\")\n for i, fname in enumerate(args):\n fpath = fname\n if not os.path.isfile(fpath):\n fpath = locate_binary(fname)\n if fpath is None:\n if env.get(\"XONSH_DEBUG\"):\n print(f\"source: {fname}: No such file\", file=sys.stderr)\n if i == 0:\n raise RuntimeError(\n \"must source at least one file, \" + fname + \" does not exist.\"\n )\n break\n _, fext = os.path.splitext(fpath)\n if fext and fext != \".xsh\" and fext != \".py\":\n raise RuntimeError(\n \"attempting to source non-xonsh file! If you are \"\n \"trying to source a file in another language, \"\n \"then please use the appropriate source command. \"\n \"For example, source-bash script.sh\"\n )\n with open(fpath, encoding=encoding, errors=errors) as fp:\n src = fp.read()\n if not src.endswith(\"\\n\"):\n src += \"\\n\"\n ctx = XSH.ctx\n updates = {\"__file__\": fpath, \"__name__\": os.path.abspath(fpath)}\n with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates):\n try:\n XSH.builtins.execx(src, \"exec\", ctx, filename=fpath)\n except Exception:\n print_color(\n \"{RED}You may be attempting to source non-xonsh file! \"\n \"{RESET}If you are trying to source a file in \"\n \"another language, then please use the appropriate \"\n \"source command. For example, {GREEN}source-bash \"\n \"script.sh{RESET}\",\n file=sys.stderr,\n )\n raise\n\n\ndef source_cmd_fn(\n files: Annotated[tp.List[str], Arg(nargs=\"+\")],\n login=False,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n postcmd=\"\",\n funcscmd=\"\",\n seterrprevcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"\n Source cmd.exe files\n\n Parameters\n ----------\n files\n paths to source files.\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -s, --safe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n args = list(files)\n fpath = locate_binary(args[0])\n args[0] = fpath if fpath else args[0]\n if not os.path.isfile(args[0]):\n return (None, f\"xonsh: error: File not found: {args[0]}\\n\", 1)\n prevcmd = \"call \"\n prevcmd += \" \".join([argvquote(arg, force=True) for arg in args])\n prevcmd = escape_windows_cmd_string(prevcmd)\n with XSH.env.swap(PROMPT=\"$P$G\"):\n return source_foreign_fn(\n shell=\"cmd\",\n files_or_code=args,\n interactive=True,\n sourcer=\"call\",\n envcmd=\"set\",\n seterrpostcmd=\"if errorlevel 1 exit 1\",\n use_tmpfile=True,\n prevcmd=prevcmd,\n # from this function\n login=login,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n postcmd=postcmd,\n funcscmd=funcscmd,\n seterrprevcmd=seterrprevcmd,\n overwrite_aliases=overwrite_aliases,\n suppress_skip_message=suppress_skip_message,\n show=show,\n dryrun=dryrun,\n )\n\n\nsource_cmd = ArgParserAlias(func=source_cmd_fn, has_args=True, prog=\"source-cmd\")\n\n\ndef xexec_fn(\n command: Annotated[tp.List[str], Arg(nargs=argparse.REMAINDER)],\n login=False,\n clean=False,\n name=\"\",\n _stdin=None,\n):\n \"\"\"exec (also aliased as xexec) uses the os.execvpe() function to\n replace the xonsh process with the specified program.\n\n This provides the functionality of the bash 'exec' builtin::\n\n >>> exec bash -l -i\n bash $\n\n Parameters\n ----------\n command\n program to launch along its arguments\n login : -l, --login\n the shell places a dash at the\n beginning of the zeroth argument passed to command to simulate login\n shell.\n clean : -c, --clean\n causes command to be executed with an empty environment.\n name : -a, --name\n the shell passes name as the zeroth argument\n to the executed command.\n\n Notes\n -----\n This command **is not** the same as the Python builtin function\n exec(). That function is for running Python code. This command,\n which shares the same name as the sh-lang statement, is for launching\n a command directly in the same process. In the event of a name conflict,\n please use the xexec command directly or dive into subprocess mode\n explicitly with ![exec command]. For more details, please see\n http://xon.sh/faq.html#exec.\n \"\"\"\n if len(command) == 0:\n return (None, \"xonsh: exec: no command specified\\n\", 1)\n\n cmd = command[0]\n if name:\n command[0] = name\n if login:\n command[0] = f\"-{command[0]}\"\n\n denv = {}\n if not clean:\n denv = XSH.env.detype()\n\n # decrement $SHLVL to mirror bash's behaviour\n if \"SHLVL\" in denv:\n old_shlvl = to_shlvl(denv[\"SHLVL\"])\n denv[\"SHLVL\"] = str(adjust_shlvl(old_shlvl, -1))\n\n try:\n os.execvpe(cmd, command, denv)\n except FileNotFoundError as e:\n return (\n None,\n \"xonsh: exec: file not found: {}: {}\" \"\\n\".format(e.args[1], command[0]),\n 1,\n )\n\n\nxexec = ArgParserAlias(func=xexec_fn, has_args=True, prog=\"xexec\")\n\n\n@lazyobject\ndef xonfig():\n \"\"\"Runs the xonsh configuration utility.\"\"\"\n from xonsh.xonfig import xonfig_main # lazy import\n\n return xonfig_main\n\n\n@unthreadable\ndef trace(args, stdin=None, stdout=None, stderr=None, spec=None):\n \"\"\"Runs the xonsh tracer utility.\"\"\"\n from xonsh.tracer import tracermain # lazy import\n\n try:\n return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)\n except SystemExit:\n pass\n\n\ndef showcmd(args, stdin=None):\n \"\"\"usage: showcmd [-h|--help|cmd args]\n\n Displays the command and arguments as a list of strings that xonsh would\n run in subprocess mode. This is useful for determining how xonsh evaluates\n your commands and arguments prior to running these commands.\n\n optional arguments:\n -h, --help show this help message and exit\n\n Examples\n --------\n >>> showcmd echo $USER \"can't\" hear \"the sea\"\n ['echo', 'I', \"can't\", 'hear', 'the sea']\n \"\"\"\n if len(args) == 0 or (len(args) == 1 and args[0] in {\"-h\", \"--help\"}):\n print(showcmd.__doc__.rstrip().replace(\"\\n \", \"\\n\"))\n else:\n sys.displayhook(args)\n\n\ndef detect_xpip_alias():\n \"\"\"\n Determines the correct invocation to get xonsh's pip\n \"\"\"\n if not getattr(sys, \"executable\", None):\n return lambda args, stdin=None: (\n \"\",\n \"Sorry, unable to run pip on your system (missing sys.executable)\",\n 1,\n )\n\n basecmd = [sys.executable, \"-m\", \"pip\"]\n try:\n if ON_WINDOWS or IN_APPIMAGE:\n # XXX: Does windows have an installation mode that requires UAC?\n return basecmd\n elif not os.access(os.path.dirname(sys.executable), os.W_OK):\n return [\"sudo\"] + basecmd\n else:\n return basecmd\n except Exception:\n # Something freaky happened, return something that'll probably work\n return basecmd\n\n\ndef make_default_aliases():\n \"\"\"Creates a new default aliases dictionary.\"\"\"\n default_aliases = {\n \"cd\": cd,\n \"pushd\": pushd,\n \"popd\": popd,\n \"dirs\": dirs,\n \"jobs\": jobs,\n \"fg\": fg,\n \"bg\": bg,\n \"disown\": disown,\n \"EOF\": xonsh_exit,\n \"exit\": xonsh_exit,\n \"quit\": xonsh_exit,\n \"exec\": xexec,\n \"xexec\": xexec,\n \"source\": source_alias,\n \"source-zsh\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"zsh\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-zsh\",\n ),\n \"source-bash\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"bash\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-bash\",\n ),\n \"source-cmd\": source_cmd,\n \"source-foreign\": source_foreign,\n \"history\": xhm.history_main,\n \"trace\": trace,\n \"timeit\": timeit_alias,\n \"xonfig\": xonfig,\n \"scp-resume\": [\"rsync\", \"--partial\", \"-h\", \"--progress\", \"--rsh=ssh\"],\n \"showcmd\": showcmd,\n \"ipynb\": [\"jupyter\", \"notebook\", \"--no-browser\"],\n \"which\": xxw.which,\n \"xontrib\": xontribs_main,\n \"completer\": xca.completer_alias,\n \"xpip\": detect_xpip_alias(),\n \"xonsh-reset\": xonsh_reset,\n }\n if ON_WINDOWS:\n # Borrow builtin commands from cmd.exe.\n windows_cmd_aliases = {\n \"cls\",\n \"copy\",\n \"del\",\n \"dir\",\n \"echo\",\n \"erase\",\n \"md\",\n \"mkdir\",\n \"mklink\",\n \"move\",\n \"rd\",\n \"ren\",\n \"rename\",\n \"rmdir\",\n \"time\",\n \"type\",\n \"vol\",\n }\n for alias in windows_cmd_aliases:\n default_aliases[alias] = [\"cmd\", \"/c\", alias]\n default_aliases[\"call\"] = [\"source-cmd\"]\n default_aliases[\"source-bat\"] = [\"source-cmd\"]\n default_aliases[\"clear\"] = \"cls\"\n if ON_ANACONDA:\n # Add aliases specific to the Anaconda python distribution.\n default_aliases[\"activate\"] = [\"source-cmd\", \"activate.bat\"]\n default_aliases[\"deactivate\"] = [\"source-cmd\", \"deactivate.bat\"]\n if not locate_binary(\"sudo\"):\n import xonsh.winutils as winutils\n\n def sudo(args):\n if len(args) < 1:\n print(\n \"You need to provide an executable to run as \" \"Administrator.\"\n )\n return\n cmd = args[0]\n if locate_binary(cmd):\n return winutils.sudo(cmd, args[1:])\n elif cmd.lower() in windows_cmd_aliases:\n args = [\"/D\", \"/C\", \"CD\", _get_cwd(), \"&&\"] + args\n return winutils.sudo(\"cmd\", args)\n else:\n msg = 'Cannot find the path for executable \"{0}\".'\n print(msg.format(cmd))\n\n default_aliases[\"sudo\"] = sudo\n elif ON_DARWIN:\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_FREEBSD or ON_DRAGONFLY:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_NETBSD:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n elif ON_OPENBSD:\n pass\n else:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"--color=auto\", \"-v\"]\n return default_aliases\n",
"path": "xonsh/aliases.py"
}
] | [
{
"content": "\"\"\"Aliases for the xonsh shell.\"\"\"\nimport argparse\nimport collections.abc as cabc\nimport functools\nimport inspect\nimport os\nimport re\nimport sys\nimport types\nimport typing as tp\n\nimport xonsh.completers._aliases as xca\nimport xonsh.history.main as xhm\nimport xonsh.xoreutils.which as xxw\nfrom xonsh.ast import isexpression\nfrom xonsh.built_ins import XSH\nfrom xonsh.cli_utils import Annotated, Arg, ArgParserAlias\nfrom xonsh.dirstack import _get_cwd, cd, dirs, popd, pushd\nfrom xonsh.environ import locate_binary, make_args_env\nfrom xonsh.foreign_shells import foreign_shell_data\nfrom xonsh.jobs import bg, clean_jobs, disown, fg, jobs\nfrom xonsh.lazyasd import lazyobject\nfrom xonsh.platform import (\n IN_APPIMAGE,\n ON_ANACONDA,\n ON_DARWIN,\n ON_DRAGONFLY,\n ON_FREEBSD,\n ON_NETBSD,\n ON_OPENBSD,\n ON_WINDOWS,\n)\nfrom xonsh.timings import timeit_alias\nfrom xonsh.tools import (\n ALIAS_KWARG_NAMES,\n XonshError,\n adjust_shlvl,\n argvquote,\n escape_windows_cmd_string,\n print_color,\n strip_simple_quotes,\n swap_values,\n to_repr_pretty_,\n to_shlvl,\n unthreadable,\n)\nfrom xonsh.xontribs import xontribs_main\n\n\n@lazyobject\ndef EXEC_ALIAS_RE():\n return re.compile(r\"@\\(|\\$\\(|!\\(|\\$\\[|!\\[|\\&\\&|\\|\\||\\s+and\\s+|\\s+or\\s+|[>|<]\")\n\n\nclass Aliases(cabc.MutableMapping):\n \"\"\"Represents a location to hold and look up aliases.\"\"\"\n\n def __init__(self, *args, **kwargs):\n self._raw = {}\n self.update(*args, **kwargs)\n\n @staticmethod\n def _get_func_name(func):\n name = func.__name__\n\n # Strip leading underscore\n if name.startswith(\"_\"):\n name = name[1:]\n return name\n\n def _register(self, func, name=\"\", dash_case=True):\n name = name or self._get_func_name(func)\n\n if dash_case:\n name = name.replace(\"_\", \"-\")\n\n self[name] = func\n return func\n\n @tp.overload\n def register(self, func: types.FunctionType) -> types.FunctionType:\n \"\"\"simple usage\"\"\"\n\n @tp.overload\n def register(\n self, name: str, *, dash_case: bool = True\n ) -> tp.Callable[[types.FunctionType], types.FunctionType]:\n ...\n\n def register(self, func_or_name, name=None, dash_case=True):\n \"\"\"Decorator to register the given function by name.\"\"\"\n\n if isinstance(func_or_name, types.FunctionType):\n return self._register(func_or_name, name, dash_case)\n\n def wrapper(func):\n return self._register(func, func_or_name, dash_case)\n\n return wrapper\n\n def get(self, key, default=None):\n \"\"\"Returns the (possibly modified) value. If the key is not present,\n then `default` is returned.\n If the value is callable, it is returned without modification. If it\n is an iterable of strings it will be evaluated recursively to expand\n other aliases, resulting in a new list or a \"partially applied\"\n callable.\n \"\"\"\n val = self._raw.get(key)\n if val is None:\n return default\n elif isinstance(val, cabc.Iterable) or callable(val):\n return self.eval_alias(val, seen_tokens={key})\n else:\n msg = \"alias of {!r} has an inappropriate type: {!r}\"\n raise TypeError(msg.format(key, val))\n\n def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()):\n \"\"\"\n \"Evaluates\" the alias ``value``, by recursively looking up the leftmost\n token and \"expanding\" if it's also an alias.\n\n A value like ``[\"cmd\", \"arg\"]`` might transform like this:\n ``> [\"cmd\", \"arg\"] -> [\"ls\", \"-al\", \"arg\"] -> callable()``\n where ``cmd=ls -al`` and ``ls`` is an alias with its value being a\n callable. The resulting callable will be \"partially applied\" with\n ``[\"-al\", \"arg\"]``.\n \"\"\"\n # Beware of mutability: default values for keyword args are evaluated\n # only once.\n if callable(value):\n return partial_eval_alias(value, acc_args=acc_args)\n else:\n expand_path = XSH.expand_path\n token, *rest = map(expand_path, value)\n if token in seen_tokens or token not in self._raw:\n # ^ Making sure things like `egrep=egrep --color=auto` works,\n # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF`\n # and `ls=ls --color=auto`\n rtn = [token]\n rtn.extend(rest)\n rtn.extend(acc_args)\n return rtn\n else:\n seen_tokens = seen_tokens | {token}\n acc_args = rest + list(acc_args)\n return self.eval_alias(self._raw[token], seen_tokens, acc_args)\n\n def expand_alias(self, line: str, cursor_index: int) -> str:\n \"\"\"Expands any aliases present in line if alias does not point to a\n builtin function and if alias is only a single command.\n The command won't be expanded if the cursor's inside/behind it.\n \"\"\"\n word = (line.split(maxsplit=1) or [\"\"])[0]\n if word in XSH.aliases and isinstance(self.get(word), cabc.Sequence): # type: ignore\n word_idx = line.find(word)\n word_edge = word_idx + len(word)\n if cursor_index > word_edge:\n # the cursor isn't inside/behind the word\n expansion = \" \".join(self.get(word))\n line = line[:word_idx] + expansion + line[word_edge:]\n return line\n\n #\n # Mutable mapping interface\n #\n\n def __getitem__(self, key):\n return self._raw[key]\n\n def __setitem__(self, key, val):\n if isinstance(val, str):\n f = \"<exec-alias:\" + key + \">\"\n if EXEC_ALIAS_RE.search(val) is not None:\n # We have a sub-command (e.g. $(cmd)) or IO redirect (e.g. >>)\n self._raw[key] = ExecAlias(val, filename=f)\n elif isexpression(val):\n # expansion substitution\n lexer = XSH.execer.parser.lexer\n self._raw[key] = list(map(strip_simple_quotes, lexer.split(val)))\n else:\n # need to exec alias\n self._raw[key] = ExecAlias(val, filename=f)\n else:\n self._raw[key] = val\n\n def _common_or(self, other):\n new_dict = self._raw.copy()\n for key in dict(other):\n new_dict[key] = other[key]\n return Aliases(new_dict)\n\n def __or__(self, other):\n return self._common_or(other)\n\n def __ror__(self, other):\n return self._common_or(other)\n\n def __ior__(self, other):\n for key in dict(other):\n self[key] = other[key]\n return self\n\n def __delitem__(self, key):\n del self._raw[key]\n\n def update(self, *args, **kwargs):\n for key, val in dict(*args, **kwargs).items():\n self[key] = val\n\n def __iter__(self):\n yield from self._raw\n\n def __len__(self):\n return len(self._raw)\n\n def __str__(self):\n return str(self._raw)\n\n def __repr__(self):\n return \"{}.{}({})\".format(\n self.__class__.__module__, self.__class__.__name__, self._raw\n )\n\n _repr_pretty_ = to_repr_pretty_\n\n\nclass ExecAlias:\n \"\"\"Provides a callable alias for xonsh source code.\"\"\"\n\n def __init__(self, src, filename=\"<exec-alias>\"):\n \"\"\"\n Parameters\n ----------\n src : str\n Source code that will be\n \"\"\"\n self.src = src\n self.filename = filename\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n execer = XSH.execer\n frame = stack[0][0] # execute as though we are at the call site\n\n alias_args = {\"args\": args}\n for i, a in enumerate(args):\n alias_args[f\"arg{i}\"] = a\n\n with XSH.env.swap(alias_args):\n execer.exec(\n self.src,\n glbs=frame.f_globals,\n locs=frame.f_locals,\n filename=self.filename,\n )\n if XSH.history is not None:\n return XSH.history.last_cmd_rtn\n\n def __repr__(self):\n return f\"ExecAlias({self.src!r}, filename={self.filename!r})\"\n\n\nclass PartialEvalAliasBase:\n \"\"\"Partially evaluated alias.\"\"\"\n\n def __init__(self, f, acc_args=()):\n \"\"\"\n Parameters\n ----------\n f : callable\n A function to dispatch to.\n acc_args : sequence of strings, optional\n Additional arguments to prepent to the argument list passed in\n when the alias is called.\n \"\"\"\n self.f = f\n self.acc_args = acc_args\n self.__name__ = getattr(f, \"__name__\", self.__class__.__name__)\n\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n def __repr__(self):\n return \"{name}({f!r}, acc_args={acc_args!r})\".format(\n name=self.__class__.__name__, f=self.f, acc_args=self.acc_args\n )\n\n\nclass PartialEvalAlias0(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n if args:\n msg = \"callable alias {f!r} takes no arguments, but {args!f} provided. \"\n msg += \"Of these {acc_args!r} were partially applied.\"\n raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args))\n return self.f()\n\n\nclass PartialEvalAlias1(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args)\n\n\nclass PartialEvalAlias2(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin)\n\n\nclass PartialEvalAlias3(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout)\n\n\nclass PartialEvalAlias4(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr)\n\n\nclass PartialEvalAlias5(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec)\n\n\nclass PartialEvalAlias6(PartialEvalAliasBase):\n def __call__(\n self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None\n ):\n args = list(self.acc_args) + args\n return self.f(args, stdin, stdout, stderr, spec, stack)\n\n\nPARTIAL_EVAL_ALIASES = (\n PartialEvalAlias0,\n PartialEvalAlias1,\n PartialEvalAlias2,\n PartialEvalAlias3,\n PartialEvalAlias4,\n PartialEvalAlias5,\n PartialEvalAlias6,\n)\n\n\ndef partial_eval_alias(f, acc_args=()):\n \"\"\"Dispatches the appropriate eval alias based on the number of args to the original callable alias\n and how many arguments to apply.\n \"\"\"\n # no partial needed if no extra args\n if not acc_args:\n return f\n # need to dispatch\n numargs = 0\n for name, param in inspect.signature(f).parameters.items():\n if (\n param.kind == param.POSITIONAL_ONLY\n or param.kind == param.POSITIONAL_OR_KEYWORD\n ):\n numargs += 1\n elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY:\n numargs += 1\n if numargs < 7:\n return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args)\n else:\n e = \"Expected proxy with 6 or fewer arguments for {}, not {}\"\n raise XonshError(e.format(\", \".join(ALIAS_KWARG_NAMES), numargs))\n\n\n#\n# Actual aliases below\n#\n\n\ndef xonsh_exit(args, stdin=None):\n \"\"\"Sends signal to exit shell.\"\"\"\n if not clean_jobs():\n # Do not exit if jobs not cleaned up\n return None, None\n XSH.exit = True\n print() # gimme a newline\n return None, None\n\n\ndef xonsh_reset(args, stdin=None):\n \"\"\"Clears __xonsh__.ctx\"\"\"\n XSH.ctx.clear()\n\n\ndef source_foreign_fn(\n shell: str,\n files_or_code: Annotated[tp.List[str], Arg(nargs=\"+\")],\n interactive=True,\n login=False,\n envcmd=None,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n prevcmd=\"\",\n postcmd=\"\",\n funcscmd=\"\",\n sourcer=None,\n use_tmpfile=False,\n seterrprevcmd=None,\n seterrpostcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"Sources a file written in a foreign shell language.\n\n Parameters\n ----------\n shell\n Name or path to the foreign shell\n files_or_code\n file paths to source or code in the target language.\n interactive : -n, --non-interactive\n whether the sourced shell should be interactive\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -u, --unsafe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n prevcmd : -p, --prevcmd\n command(s) to run before any other commands, replaces traditional source.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n sourcer : --sourcer\n the source command in the target shell language.\n If this is not set, a default value will attempt to be\n looked up based on the shell name.\n use_tmpfile : --use-tmpfile\n whether the commands for source shell should be written to a temporary file.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n seterrpostcmd : --seterrpostcmd\n command(s) to set exit-on-error after all other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n extra_args = tuple(extra_args.split())\n env = XSH.env\n suppress_skip_message = (\n env.get(\"FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE\")\n if not suppress_skip_message\n else suppress_skip_message\n )\n files: tp.Tuple[str, ...] = ()\n if prevcmd:\n pass # don't change prevcmd if given explicitly\n elif os.path.isfile(files_or_code[0]):\n if not sourcer:\n return (None, \"xonsh: error: `sourcer` command is not mentioned.\\n\", 1)\n # we have filenames to source\n prevcmd = \"\".join([f\"{sourcer} {f}\\n\" for f in files_or_code])\n files = tuple(files_or_code)\n elif not prevcmd:\n prevcmd = \" \".join(files_or_code) # code to run, no files\n foreign_shell_data.cache_clear() # make sure that we don't get prev src\n fsenv, fsaliases = foreign_shell_data(\n shell=shell,\n login=login,\n interactive=interactive,\n envcmd=envcmd,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n prevcmd=prevcmd,\n postcmd=postcmd,\n funcscmd=funcscmd or None, # the default is None in the called function\n sourcer=sourcer,\n use_tmpfile=use_tmpfile,\n seterrprevcmd=seterrprevcmd,\n seterrpostcmd=seterrpostcmd,\n show=show,\n dryrun=dryrun,\n files=files,\n )\n if fsenv is None:\n if dryrun:\n return\n else:\n msg = f\"xonsh: error: Source failed: {prevcmd!r}\\n\"\n msg += \"xonsh: error: Possible reasons: File not found or syntax error\\n\"\n return (None, msg, 1)\n # apply results\n denv = env.detype()\n for k, v in fsenv.items():\n if k == \"SHLVL\": # ignore $SHLVL as sourcing should not change $SHLVL\n continue\n if k in denv and v == denv[k]:\n continue # no change from original\n env[k] = v\n # Remove any env-vars that were unset by the script.\n for k in denv:\n if k not in fsenv:\n env.pop(k, None)\n # Update aliases\n baliases = XSH.aliases\n for k, v in fsaliases.items():\n if k in baliases and v == baliases[k]:\n continue # no change from original\n elif overwrite_aliases or k not in baliases:\n baliases[k] = v\n elif suppress_skip_message:\n pass\n else:\n msg = (\n \"Skipping application of {0!r} alias from {1!r} \"\n \"since it shares a name with an existing xonsh alias. \"\n 'Use \"--overwrite-alias\" option to apply it anyway.'\n 'You may prevent this message with \"--suppress-skip-message\" or '\n '\"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True\".'\n )\n print(msg.format(k, shell), file=_stderr)\n\n\nsource_foreign = ArgParserAlias(\n func=source_foreign_fn, has_args=True, prog=\"source-foreign\"\n)\n\n\n@unthreadable\ndef source_alias(args, stdin=None):\n \"\"\"Executes the contents of the provided files in the current context.\n If sourced file isn't found in cwd, search for file along $PATH to source\n instead.\n \"\"\"\n env = XSH.env\n encoding = env.get(\"XONSH_ENCODING\")\n errors = env.get(\"XONSH_ENCODING_ERRORS\")\n for i, fname in enumerate(args):\n fpath = fname\n if not os.path.isfile(fpath):\n fpath = locate_binary(fname)\n if fpath is None:\n if env.get(\"XONSH_DEBUG\"):\n print(f\"source: {fname}: No such file\", file=sys.stderr)\n if i == 0:\n raise RuntimeError(\n \"must source at least one file, \" + fname + \" does not exist.\"\n )\n break\n _, fext = os.path.splitext(fpath)\n if fext and fext != \".xsh\" and fext != \".py\":\n raise RuntimeError(\n \"attempting to source non-xonsh file! If you are \"\n \"trying to source a file in another language, \"\n \"then please use the appropriate source command. \"\n \"For example, source-bash script.sh\"\n )\n with open(fpath, encoding=encoding, errors=errors) as fp:\n src = fp.read()\n if not src.endswith(\"\\n\"):\n src += \"\\n\"\n ctx = XSH.ctx\n updates = {\"__file__\": fpath, \"__name__\": os.path.abspath(fpath)}\n with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates):\n try:\n XSH.builtins.execx(src, \"exec\", ctx, filename=fpath)\n except Exception:\n print_color(\n \"{RED}You may be attempting to source non-xonsh file! \"\n \"{RESET}If you are trying to source a file in \"\n \"another language, then please use the appropriate \"\n \"source command. For example, {GREEN}source-bash \"\n \"script.sh{RESET}\",\n file=sys.stderr,\n )\n raise\n\n\ndef source_cmd_fn(\n files: Annotated[tp.List[str], Arg(nargs=\"+\")],\n login=False,\n aliascmd=None,\n extra_args=\"\",\n safe=True,\n postcmd=\"\",\n funcscmd=\"\",\n seterrprevcmd=None,\n overwrite_aliases=False,\n suppress_skip_message=False,\n show=False,\n dryrun=False,\n _stderr=None,\n):\n \"\"\"\n Source cmd.exe files\n\n Parameters\n ----------\n files\n paths to source files.\n login : -l, --login\n whether the sourced shell should be login\n envcmd : --envcmd\n command to print environment\n aliascmd : --aliascmd\n command to print aliases\n extra_args : --extra-args\n extra arguments needed to run the shell\n safe : -s, --safe\n whether the source shell should be run safely, and not raise any errors, even if they occur.\n postcmd : --postcmd\n command(s) to run after all other commands\n funcscmd : --funcscmd\n code to find locations of all native functions in the shell language.\n seterrprevcmd : --seterrprevcmd\n command(s) to set exit-on-error before any other commands.\n overwrite_aliases : --overwrite-aliases\n flag for whether or not sourced aliases should replace the current xonsh aliases.\n suppress_skip_message : --suppress-skip-message\n flag for whether or not skip messages should be suppressed.\n show : --show\n show the script output.\n dryrun : -d, --dry-run\n Will not actually source the file.\n \"\"\"\n args = list(files)\n fpath = locate_binary(args[0])\n args[0] = fpath if fpath else args[0]\n if not os.path.isfile(args[0]):\n return (None, f\"xonsh: error: File not found: {args[0]}\\n\", 1)\n prevcmd = \"call \"\n prevcmd += \" \".join([argvquote(arg, force=True) for arg in args])\n prevcmd = escape_windows_cmd_string(prevcmd)\n with XSH.env.swap(PROMPT=\"$P$G\"):\n return source_foreign_fn(\n shell=\"cmd\",\n files_or_code=args,\n interactive=True,\n sourcer=\"call\",\n envcmd=\"set\",\n seterrpostcmd=\"if errorlevel 1 exit 1\",\n use_tmpfile=True,\n prevcmd=prevcmd,\n # from this function\n login=login,\n aliascmd=aliascmd,\n extra_args=extra_args,\n safe=safe,\n postcmd=postcmd,\n funcscmd=funcscmd,\n seterrprevcmd=seterrprevcmd,\n overwrite_aliases=overwrite_aliases,\n suppress_skip_message=suppress_skip_message,\n show=show,\n dryrun=dryrun,\n )\n\n\nsource_cmd = ArgParserAlias(func=source_cmd_fn, has_args=True, prog=\"source-cmd\")\n\n\ndef xexec_fn(\n command: Annotated[tp.List[str], Arg(nargs=argparse.REMAINDER)],\n login=False,\n clean=False,\n name=\"\",\n _stdin=None,\n):\n \"\"\"exec (also aliased as xexec) uses the os.execvpe() function to\n replace the xonsh process with the specified program.\n\n This provides the functionality of the bash 'exec' builtin::\n\n >>> exec bash -l -i\n bash $\n\n Parameters\n ----------\n command\n program to launch along its arguments\n login : -l, --login\n the shell places a dash at the\n beginning of the zeroth argument passed to command to simulate login\n shell.\n clean : -c, --clean\n causes command to be executed with an empty environment.\n name : -a, --name\n the shell passes name as the zeroth argument\n to the executed command.\n\n Notes\n -----\n This command **is not** the same as the Python builtin function\n exec(). That function is for running Python code. This command,\n which shares the same name as the sh-lang statement, is for launching\n a command directly in the same process. In the event of a name conflict,\n please use the xexec command directly or dive into subprocess mode\n explicitly with ![exec command]. For more details, please see\n http://xon.sh/faq.html#exec.\n \"\"\"\n if len(command) == 0:\n return (None, \"xonsh: exec: no command specified\\n\", 1)\n\n cmd = command[0]\n if name:\n command[0] = name\n if login:\n command[0] = f\"-{command[0]}\"\n\n denv = {}\n if not clean:\n denv = XSH.env.detype()\n\n # decrement $SHLVL to mirror bash's behaviour\n if \"SHLVL\" in denv:\n old_shlvl = to_shlvl(denv[\"SHLVL\"])\n denv[\"SHLVL\"] = str(adjust_shlvl(old_shlvl, -1))\n\n try:\n os.execvpe(cmd, command, denv)\n except FileNotFoundError as e:\n return (\n None,\n \"xonsh: exec: file not found: {}: {}\" \"\\n\".format(e.args[1], command[0]),\n 1,\n )\n\n\nxexec = ArgParserAlias(func=xexec_fn, has_args=True, prog=\"xexec\")\n\n\n@lazyobject\ndef xonfig():\n \"\"\"Runs the xonsh configuration utility.\"\"\"\n from xonsh.xonfig import xonfig_main # lazy import\n\n return xonfig_main\n\n\n@unthreadable\ndef trace(args, stdin=None, stdout=None, stderr=None, spec=None):\n \"\"\"Runs the xonsh tracer utility.\"\"\"\n from xonsh.tracer import tracermain # lazy import\n\n try:\n return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec)\n except SystemExit:\n pass\n\n\ndef showcmd(args, stdin=None):\n \"\"\"usage: showcmd [-h|--help|cmd args]\n\n Displays the command and arguments as a list of strings that xonsh would\n run in subprocess mode. This is useful for determining how xonsh evaluates\n your commands and arguments prior to running these commands.\n\n optional arguments:\n -h, --help show this help message and exit\n\n Examples\n --------\n >>> showcmd echo $USER \"can't\" hear \"the sea\"\n ['echo', 'I', \"can't\", 'hear', 'the sea']\n \"\"\"\n if len(args) == 0 or (len(args) == 1 and args[0] in {\"-h\", \"--help\"}):\n print(showcmd.__doc__.rstrip().replace(\"\\n \", \"\\n\"))\n else:\n sys.displayhook(args)\n\n\ndef detect_xpip_alias():\n \"\"\"\n Determines the correct invocation to get xonsh's pip\n \"\"\"\n if not getattr(sys, \"executable\", None):\n return lambda args, stdin=None: (\n \"\",\n \"Sorry, unable to run pip on your system (missing sys.executable)\",\n 1,\n )\n\n basecmd = [sys.executable, \"-m\", \"pip\"]\n try:\n if ON_WINDOWS or IN_APPIMAGE:\n # XXX: Does windows have an installation mode that requires UAC?\n return basecmd\n elif not os.access(os.path.dirname(sys.executable), os.W_OK):\n return basecmd.extend([\"--user\"])\n else:\n return basecmd\n except Exception:\n # Something freaky happened, return something that'll probably work\n return basecmd\n\n\ndef make_default_aliases():\n \"\"\"Creates a new default aliases dictionary.\"\"\"\n default_aliases = {\n \"cd\": cd,\n \"pushd\": pushd,\n \"popd\": popd,\n \"dirs\": dirs,\n \"jobs\": jobs,\n \"fg\": fg,\n \"bg\": bg,\n \"disown\": disown,\n \"EOF\": xonsh_exit,\n \"exit\": xonsh_exit,\n \"quit\": xonsh_exit,\n \"exec\": xexec,\n \"xexec\": xexec,\n \"source\": source_alias,\n \"source-zsh\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"zsh\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-zsh\",\n ),\n \"source-bash\": ArgParserAlias(\n func=functools.partial(source_foreign_fn, \"bash\", sourcer=\"source\"),\n has_args=True,\n prog=\"source-bash\",\n ),\n \"source-cmd\": source_cmd,\n \"source-foreign\": source_foreign,\n \"history\": xhm.history_main,\n \"trace\": trace,\n \"timeit\": timeit_alias,\n \"xonfig\": xonfig,\n \"scp-resume\": [\"rsync\", \"--partial\", \"-h\", \"--progress\", \"--rsh=ssh\"],\n \"showcmd\": showcmd,\n \"ipynb\": [\"jupyter\", \"notebook\", \"--no-browser\"],\n \"which\": xxw.which,\n \"xontrib\": xontribs_main,\n \"completer\": xca.completer_alias,\n \"xpip\": detect_xpip_alias(),\n \"xonsh-reset\": xonsh_reset,\n }\n if ON_WINDOWS:\n # Borrow builtin commands from cmd.exe.\n windows_cmd_aliases = {\n \"cls\",\n \"copy\",\n \"del\",\n \"dir\",\n \"echo\",\n \"erase\",\n \"md\",\n \"mkdir\",\n \"mklink\",\n \"move\",\n \"rd\",\n \"ren\",\n \"rename\",\n \"rmdir\",\n \"time\",\n \"type\",\n \"vol\",\n }\n for alias in windows_cmd_aliases:\n default_aliases[alias] = [\"cmd\", \"/c\", alias]\n default_aliases[\"call\"] = [\"source-cmd\"]\n default_aliases[\"source-bat\"] = [\"source-cmd\"]\n default_aliases[\"clear\"] = \"cls\"\n if ON_ANACONDA:\n # Add aliases specific to the Anaconda python distribution.\n default_aliases[\"activate\"] = [\"source-cmd\", \"activate.bat\"]\n default_aliases[\"deactivate\"] = [\"source-cmd\", \"deactivate.bat\"]\n if not locate_binary(\"sudo\"):\n import xonsh.winutils as winutils\n\n def sudo(args):\n if len(args) < 1:\n print(\n \"You need to provide an executable to run as \" \"Administrator.\"\n )\n return\n cmd = args[0]\n if locate_binary(cmd):\n return winutils.sudo(cmd, args[1:])\n elif cmd.lower() in windows_cmd_aliases:\n args = [\"/D\", \"/C\", \"CD\", _get_cwd(), \"&&\"] + args\n return winutils.sudo(\"cmd\", args)\n else:\n msg = 'Cannot find the path for executable \"{0}\".'\n print(msg.format(cmd))\n\n default_aliases[\"sudo\"] = sudo\n elif ON_DARWIN:\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_FREEBSD or ON_DRAGONFLY:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"-G\"]\n elif ON_NETBSD:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n elif ON_OPENBSD:\n pass\n else:\n default_aliases[\"grep\"] = [\"grep\", \"--color=auto\"]\n default_aliases[\"egrep\"] = [\"egrep\", \"--color=auto\"]\n default_aliases[\"fgrep\"] = [\"fgrep\", \"--color=auto\"]\n default_aliases[\"ls\"] = [\"ls\", \"--color=auto\", \"-v\"]\n return default_aliases\n",
"path": "xonsh/aliases.py"
}
] | diff --git a/news/xpip_perms.rst b/news/xpip_perms.rst
new file mode 100644
index 0000000000..9921f44628
--- /dev/null
+++ b/news/xpip_perms.rst
@@ -0,0 +1,24 @@
+**Added:**
+
+* <news item>
+
+**Changed:**
+
+* <news item>
+
+**Deprecated:**
+
+* <news item>
+
+**Removed:**
+
+* <news item>
+
+**Fixed:**
+
+* <news item>
+
+**Security:**
+
+* ``xpip`` will never add ``sudo`` under any circumstances and will instead append ``--user`` as needed
+
diff --git a/xonsh/aliases.py b/xonsh/aliases.py
index 913af0a9ff..3fc50a17d9 100644
--- a/xonsh/aliases.py
+++ b/xonsh/aliases.py
@@ -815,7 +815,7 @@ def detect_xpip_alias():
# XXX: Does windows have an installation mode that requires UAC?
return basecmd
elif not os.access(os.path.dirname(sys.executable), os.W_OK):
- return ["sudo"] + basecmd
+ return basecmd.extend(["--user"])
else:
return basecmd
except Exception:
| xpip doesn't detect/support "pip install --user" installs of xonsh
## xonfig
<details>
```
+------------------+----------------------+
| xonsh | 0.9.27 |
| Git SHA | 71fe9014 |
| Commit Date | Jan 29 08:58:58 2021 |
| Python | 3.9.5 |
| PLY | 3.11 |
| have readline | True |
| prompt toolkit | 3.0.19 |
| shell type | prompt_toolkit |
| pygments | 2.9.0 |
| on posix | True |
| on linux | True |
| distro | ubuntu |
| on darwin | False |
| on windows | False |
| on cygwin | False |
| on msys2 | False |
| is superuser | False |
| default encoding | utf-8 |
| xonsh encoding | utf-8 |
| encoding errors | surrogateescape |
| on jupyter | False |
| jupyter kernel | None |
| xontrib 1 | apt_tabcomplete |
| xontrib 2 | direnv |
| xontrib 3 | kitty |
| xontrib 4 | linuxbrew |
+------------------+----------------------+
```
</details>
## Expected Behavior
After installing xonsh via `pip3 install --user xonsh` (and ensuring that `~/.local/bin` is on `$PATH`, etc), xonsh works and runs just fine. Since `xpip` is supposed to manage the Python environment where xonsh itself is defined, I would expect it to wrap a non-root `pip`, ideally invoked in a way to install in the user's dir.
## Current Behavior
```
$ which xpip
sudo /usr/bin/python3 -m pip
```
Instead, `xpip` wraps a `sudo` invocation that will install things globally systemwide, which is not at all how xonsh itself was installed. And, if the user tries to do something "smart" like `xpip install --user xontrib-whatever`, I'm not sure quite what it will do but surely nothing good.
## Steps to Reproduce
1. Install xonsh via `pip3 install --user xonsh`
2. Run `xpip` to install something like a xonfig
3. Sadness and an unexpected `sudo` that might do undesired things to your system
## For community
⬇️ **Please click the 👍 reaction instead of leaving a `+1` or 👍 comment**
|
pypi__warehouse-8550 | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Project, Release\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n_CACHE_DECORATOR = [\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n]\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project, Release.yanked.is_(False))\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.project_slash\",\n context=Project,\n decorator=_CACHE_DECORATOR,\n)\ndef json_project_slash(project, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\"legacy.api.json.project\", name=project.name),\n headers=_CORS_HEADERS,\n )\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(\n Load(Release).load_only(\n \"version\", \"requires_python\", \"yanked\", \"yanked_reason\"\n )\n )\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"upload_time_iso_8601\": f.upload_time.isoformat() + \"Z\",\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n \"requires_python\": r.requires_python if r.requires_python else None,\n \"yanked\": r.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description.content_type,\n \"description\": release.description.raw,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": None,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n\n\n@view_config(\n route_name=\"legacy.api.json.release_slash\",\n context=Release,\n decorator=_CACHE_DECORATOR,\n)\ndef json_release_slash(release, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\n \"legacy.api.json.release\",\n name=release.project.name,\n version=release.version,\n ),\n headers=_CORS_HEADERS,\n )\n",
"path": "warehouse/legacy/api/json.py"
}
] | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom pyramid.httpexceptions import HTTPMovedPermanently, HTTPNotFound\nfrom pyramid.view import view_config\nfrom sqlalchemy.orm import Load\nfrom sqlalchemy.orm.exc import NoResultFound\n\nfrom warehouse.cache.http import cache_control\nfrom warehouse.cache.origin import origin_cache\nfrom warehouse.packaging.models import File, Project, Release\n\n# Generate appropriate CORS headers for the JSON endpoint.\n# We want to allow Cross-Origin requests here so that users can interact\n# with these endpoints via XHR/Fetch APIs in the browser.\n_CORS_HEADERS = {\n \"Access-Control-Allow-Origin\": \"*\",\n \"Access-Control-Allow-Headers\": \", \".join(\n [\n \"Content-Type\",\n \"If-Match\",\n \"If-Modified-Since\",\n \"If-None-Match\",\n \"If-Unmodified-Since\",\n ]\n ),\n \"Access-Control-Allow-Methods\": \"GET\",\n \"Access-Control-Max-Age\": \"86400\", # 1 day.\n \"Access-Control-Expose-Headers\": \", \".join([\"X-PyPI-Last-Serial\"]),\n}\n\n_CACHE_DECORATOR = [\n cache_control(15 * 60), # 15 minutes\n origin_cache(\n 1 * 24 * 60 * 60, # 1 day\n stale_while_revalidate=5 * 60, # 5 minutes\n stale_if_error=1 * 24 * 60 * 60, # 1 day\n ),\n]\n\n\n@view_config(\n route_name=\"legacy.api.json.project\",\n context=Project,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_project(project, request):\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n try:\n release = (\n request.db.query(Release)\n .filter(Release.project == project, Release.yanked.is_(False))\n .order_by(Release.is_prerelease.nullslast(), Release._pypi_ordering.desc())\n .limit(1)\n .one()\n )\n except NoResultFound:\n return HTTPNotFound(headers=_CORS_HEADERS)\n\n return json_release(release, request)\n\n\n@view_config(\n route_name=\"legacy.api.json.project_slash\",\n context=Project,\n decorator=_CACHE_DECORATOR,\n)\ndef json_project_slash(project, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\"legacy.api.json.project\", name=project.name),\n headers=_CORS_HEADERS,\n )\n\n\n@view_config(\n route_name=\"legacy.api.json.release\",\n context=Release,\n renderer=\"json\",\n decorator=_CACHE_DECORATOR,\n)\ndef json_release(release, request):\n project = release.project\n\n if project.name != request.matchdict.get(\"name\", project.name):\n return HTTPMovedPermanently(\n request.current_route_path(name=project.name), headers=_CORS_HEADERS\n )\n\n # Apply CORS headers.\n request.response.headers.update(_CORS_HEADERS)\n\n # Get the latest serial number for this project.\n request.response.headers[\"X-PyPI-Last-Serial\"] = str(project.last_serial)\n\n # Get all of the releases and files for this project.\n release_files = (\n request.db.query(Release, File)\n .options(\n Load(Release).load_only(\n \"version\", \"requires_python\", \"yanked\", \"yanked_reason\"\n )\n )\n .outerjoin(File)\n .filter(Release.project == project)\n .order_by(Release._pypi_ordering.desc(), File.filename)\n .all()\n )\n\n # Map our releases + files into a dictionary that maps each release to a\n # list of all its files.\n releases = {}\n for r, file_ in release_files:\n files = releases.setdefault(r, [])\n if file_ is not None:\n files.append(file_)\n\n # Serialize our database objects to match the way that PyPI legacy\n # presented this data.\n releases = {\n r.version: [\n {\n \"filename\": f.filename,\n \"packagetype\": f.packagetype,\n \"python_version\": f.python_version,\n \"has_sig\": f.has_signature,\n \"comment_text\": f.comment_text,\n \"md5_digest\": f.md5_digest,\n \"digests\": {\"md5\": f.md5_digest, \"sha256\": f.sha256_digest},\n \"size\": f.size,\n # TODO: Remove this once we've had a long enough time with it\n # here to consider it no longer in use.\n \"downloads\": -1,\n \"upload_time\": f.upload_time.strftime(\"%Y-%m-%dT%H:%M:%S\"),\n \"upload_time_iso_8601\": f.upload_time.isoformat() + \"Z\",\n \"url\": request.route_url(\"packaging.file\", path=f.path),\n \"requires_python\": r.requires_python if r.requires_python else None,\n \"yanked\": r.yanked,\n \"yanked_reason\": r.yanked_reason or None,\n }\n for f in fs\n ]\n for r, fs in releases.items()\n }\n\n return {\n \"info\": {\n \"name\": project.name,\n \"version\": release.version,\n \"summary\": release.summary,\n \"description_content_type\": release.description.content_type,\n \"description\": release.description.raw,\n \"keywords\": release.keywords,\n \"license\": release.license,\n \"classifiers\": list(release.classifiers),\n \"author\": release.author,\n \"author_email\": release.author_email,\n \"maintainer\": release.maintainer,\n \"maintainer_email\": release.maintainer_email,\n \"requires_python\": release.requires_python,\n \"platform\": release.platform,\n \"downloads\": {\"last_day\": -1, \"last_week\": -1, \"last_month\": -1},\n \"package_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_url\": request.route_url(\"packaging.project\", name=project.name),\n \"project_urls\": OrderedDict(release.urls) if release.urls else None,\n \"release_url\": request.route_url(\n \"packaging.release\", name=project.name, version=release.version\n ),\n \"requires_dist\": (\n list(release.requires_dist) if release.requires_dist else None\n ),\n \"docs_url\": project.documentation_url,\n \"bugtrack_url\": None,\n \"home_page\": release.home_page,\n \"download_url\": release.download_url,\n \"yanked\": release.yanked,\n \"yanked_reason\": release.yanked_reason or None,\n },\n \"urls\": releases[release.version],\n \"releases\": releases,\n \"last_serial\": project.last_serial,\n }\n\n\n@view_config(\n route_name=\"legacy.api.json.release_slash\",\n context=Release,\n decorator=_CACHE_DECORATOR,\n)\ndef json_release_slash(release, request):\n return HTTPMovedPermanently(\n # Respond with redirect to url without trailing slash\n request.route_path(\n \"legacy.api.json.release\",\n name=release.project.name,\n version=release.version,\n ),\n headers=_CORS_HEADERS,\n )\n",
"path": "warehouse/legacy/api/json.py"
}
] | diff --git a/warehouse/legacy/api/json.py b/warehouse/legacy/api/json.py
index 234ce5d867ea..8ce44a1c30e5 100644
--- a/warehouse/legacy/api/json.py
+++ b/warehouse/legacy/api/json.py
@@ -190,7 +190,7 @@ def json_release(release, request):
"home_page": release.home_page,
"download_url": release.download_url,
"yanked": release.yanked,
- "yanked_reason": r.yanked_reason or None,
+ "yanked_reason": release.yanked_reason or None,
},
"urls": releases[release.version],
"releases": releases,
| /pypi/{package}/{version}/json: yanking an older release updates latest release's yanked_reason field
**Describe the bug**
Yanking an older version of a package leads to unexpected side-effect for latest version's package info provided via the JSON endpoint. In particular, the `yanked_reason` field gets updated.
**Expected behavior**
When yanking a version of a package, no other verision's `yanked_reason` field should be updated.
**To Reproduce**
1. Create new package on test.pypi.org
2. Release version `0.2.0`.
3. Release version `0.3.0`.
4. Yank version `0.2.0`.
5. Check json endpoint of package version `0.3.0`.
```console
$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked'
false
$ curl -sL https://test.pypi.org/pypi/abn-test-rss-yank/0.3.0/json | jq '.info.yanked_reason'
"Testing Yank"
```
**My Platform**
N/A
**Additional context**
* Test package: https://test.pypi.org/project/abn-test-rss-yank/
|
translate__pootle-5160 | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db:\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n",
"path": "pytest_pootle/plugin.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport os\nimport shutil\nfrom pkgutil import iter_modules\n\nimport pytest\n\nfrom . import fixtures\nfrom .env import PootleTestEnv\nfrom .fixtures import models as fixtures_models\nfrom .fixtures.core import management as fixtures_core_management\nfrom .fixtures.core import utils as fixtures_core_utils\nfrom .fixtures import formats as fixtures_formats\nfrom .fixtures import pootle_fs as fixtures_fs\n\n\ndef _load_fixtures(*modules):\n for mod in modules:\n path = mod.__path__\n prefix = '%s.' % mod.__name__\n\n for loader_, name, is_pkg in iter_modules(path, prefix):\n if not is_pkg:\n yield name\n\n\[email protected]\ndef po_test_dir(request, tmpdir):\n po_dir = str(tmpdir.mkdir(\"po\"))\n\n def rm_po_dir():\n if os.path.exists(po_dir):\n shutil.rmtree(po_dir)\n\n request.addfinalizer(rm_po_dir)\n return po_dir\n\n\[email protected]\ndef po_directory(request, po_test_dir, settings):\n \"\"\"Sets up a tmp directory for PO files.\"\"\"\n from pootle_store.models import fs\n\n translation_directory = settings.POOTLE_TRANSLATION_DIRECTORY\n\n # Adjust locations\n settings.POOTLE_TRANSLATION_DIRECTORY = po_test_dir\n fs.location = po_test_dir\n\n def _cleanup():\n settings.POOTLE_TRANSLATION_DIRECTORY = translation_directory\n\n request.addfinalizer(_cleanup)\n\n\[email protected](scope='session')\ndef tests_use_db(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('django_db')])\n\n\[email protected](scope='session')\ndef tests_use_vfolders(request):\n return bool(\n [item for item in request.node.items\n if item.get_marker('pootle_vfolders')])\n\n\[email protected](scope='session')\ndef tests_use_migration(request, tests_use_db):\n return bool(\n tests_use_db\n and [item for item in request.node.items\n if item.get_marker('django_migration')])\n\n\[email protected](autouse=True, scope='session')\ndef setup_db_if_needed(request, tests_use_db):\n \"\"\"Sets up the site DB only if tests requested to use the DB (autouse).\"\"\"\n if tests_use_db and not request.config.getvalue('reuse_db'):\n return request.getfuncargvalue('post_db_setup')\n\n\[email protected](scope='session')\ndef post_db_setup(translations_directory, django_db_setup, django_db_blocker,\n tests_use_db, tests_use_vfolders, request):\n \"\"\"Sets up the site DB for the test session.\"\"\"\n if tests_use_db:\n with django_db_blocker.unblock():\n PootleTestEnv().setup(\n vfolders=tests_use_vfolders)\n\n\[email protected](scope='session')\ndef django_db_use_migrations(tests_use_migration):\n return tests_use_migration\n\n\npytest_plugins = tuple(\n _load_fixtures(\n fixtures,\n fixtures_core_management,\n fixtures_core_utils,\n fixtures_formats,\n fixtures_models,\n fixtures_fs))\n",
"path": "pytest_pootle/plugin.py"
}
] | diff --git a/pytest_pootle/plugin.py b/pytest_pootle/plugin.py
index 504bf1a2293..57bdfb8f4fa 100644
--- a/pytest_pootle/plugin.py
+++ b/pytest_pootle/plugin.py
@@ -85,7 +85,7 @@ def tests_use_migration(request, tests_use_db):
@pytest.fixture(autouse=True, scope='session')
def setup_db_if_needed(request, tests_use_db):
"""Sets up the site DB only if tests requested to use the DB (autouse)."""
- if tests_use_db:
+ if tests_use_db and not request.config.getvalue('reuse_db'):
return request.getfuncargvalue('post_db_setup')
| Ensure tests can be run with `--reuse-db`
When iterating over a test that require DB access (or a few of them), currently a site-wide setup is made which in such scenario ends up being relatively time-consuming and tedious.
Ideally one could use [pytest-django's `--reuse-db` flag](http://pytest-django.readthedocs.org/en/latest/database.html#reuse-db-reuse-the-testing-database-between-test-runs) to considerably reduce setup time on test iterations, however at the current state of things such feature cannot be used due to the way the Pootle test DB environment is setup.
Let's try to fix that so we can benefit from `--reuse-db`.
|
pypi__warehouse-6294 | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n",
"path": "warehouse/forklift/legacy.py"
}
] | [
{
"content": "# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport email\nimport hashlib\nimport hmac\nimport os.path\nimport re\nimport tarfile\nimport tempfile\nimport zipfile\n\nfrom cgi import FieldStorage, parse_header\nfrom itertools import chain\n\nimport packaging.requirements\nimport packaging.specifiers\nimport packaging.utils\nimport packaging.version\nimport pkg_resources\nimport requests\nimport stdlib_list\nimport wtforms\nimport wtforms.validators\n\nfrom pyramid.httpexceptions import HTTPBadRequest, HTTPForbidden, HTTPGone\nfrom pyramid.response import Response\nfrom pyramid.view import view_config\nfrom sqlalchemy import exists, func, orm\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom warehouse import forms\nfrom warehouse.admin.squats import Squat\nfrom warehouse.classifiers.models import Classifier\nfrom warehouse.metrics import IMetricsService\nfrom warehouse.packaging.interfaces import IFileStorage\nfrom warehouse.packaging.models import (\n BlacklistedProject,\n Dependency,\n DependencyKind,\n Description,\n File,\n Filename,\n JournalEntry,\n Project,\n Release,\n Role,\n)\nfrom warehouse.utils import http, readme\n\nMAX_FILESIZE = 60 * 1024 * 1024 # 60M\nMAX_SIGSIZE = 8 * 1024 # 8K\n\nPATH_HASHER = \"blake2_256\"\n\n\ndef namespace_stdlib_list(module_list):\n for module_name in module_list:\n parts = module_name.split(\".\")\n for i, part in enumerate(parts):\n yield \".\".join(parts[: i + 1])\n\n\nSTDLIB_PROHIBITTED = {\n packaging.utils.canonicalize_name(s.rstrip(\"-_.\").lstrip(\"-_.\"))\n for s in chain.from_iterable(\n namespace_stdlib_list(stdlib_list.stdlib_list(version))\n for version in stdlib_list.short_versions\n )\n}\n\n# Wheel platform checking\n\n# Note: defining new platform ABI compatibility tags that don't\n# have a python.org binary release to anchor them is a\n# complex task that needs more than just OS+architecture info.\n# For Linux specifically, the platform ABI is defined by each\n# individual distro version, so wheels built on one version may\n# not even work on older versions of the same distro, let alone\n# a completely different distro.\n#\n# That means new entries should only be added given an\n# accompanying ABI spec that explains how to build a\n# compatible binary (see the manylinux specs as examples).\n\n# These platforms can be handled by a simple static list:\n_allowed_platforms = {\n \"any\",\n \"win32\",\n \"win_amd64\",\n \"win_ia64\",\n \"manylinux1_x86_64\",\n \"manylinux1_i686\",\n \"manylinux2010_x86_64\",\n \"manylinux2010_i686\",\n \"linux_armv6l\",\n \"linux_armv7l\",\n}\n# macosx is a little more complicated:\n_macosx_platform_re = re.compile(r\"macosx_10_(\\d+)+_(?P<arch>.*)\")\n_macosx_arches = {\n \"ppc\",\n \"ppc64\",\n \"i386\",\n \"x86_64\",\n \"intel\",\n \"fat\",\n \"fat32\",\n \"fat64\",\n \"universal\",\n}\n\n\n# Actual checking code;\ndef _valid_platform_tag(platform_tag):\n if platform_tag in _allowed_platforms:\n return True\n m = _macosx_platform_re.match(platform_tag)\n if m and m.group(\"arch\") in _macosx_arches:\n return True\n return False\n\n\n_error_message_order = [\"metadata_version\", \"name\", \"version\"]\n\n\n_dist_file_regexes = {\n # True/False is for legacy or not.\n True: re.compile(r\".+?\\.(exe|tar\\.gz|bz2|rpm|deb|zip|tgz|egg|dmg|msi|whl)$\", re.I),\n False: re.compile(r\".+?\\.(tar\\.gz|zip|whl|egg)$\", re.I),\n}\n\n\n_wheel_file_re = re.compile(\n r\"\"\"\n ^\n (?P<namever>(?P<name>.+?)(-(?P<ver>\\d.+?))?)\n (\n (-(?P<build>\\d.*?))?\n -(?P<pyver>.+?)\n -(?P<abi>.+?)\n -(?P<plat>.+?)\n (?:\\.whl|\\.dist-info)\n )\n $\n \"\"\",\n re.VERBOSE,\n)\n\n\n_project_name_re = re.compile(\n r\"^([A-Z0-9]|[A-Z0-9][A-Z0-9._-]*[A-Z0-9])$\", re.IGNORECASE\n)\n\n\n_legacy_specifier_re = re.compile(r\"^(?P<name>\\S+)(?: \\((?P<specifier>\\S+)\\))?$\")\n\n\n_valid_description_content_types = {\"text/plain\", \"text/x-rst\", \"text/markdown\"}\n\n_valid_markdown_variants = {\"CommonMark\", \"GFM\"}\n\n\ndef _exc_with_message(exc, message):\n # The crappy old API that PyPI offered uses the status to pass down\n # messages to the client. So this function will make that easier to do.\n resp = exc(message)\n resp.status = \"{} {}\".format(resp.status_code, message)\n return resp\n\n\ndef _validate_pep440_version(form, field):\n parsed = packaging.version.parse(field.data)\n\n # Check that this version is a valid PEP 440 version at all.\n if not isinstance(parsed, packaging.version.Version):\n raise wtforms.validators.ValidationError(\n \"Start and end with a letter or numeral containing only \"\n \"ASCII numeric and '.', '_' and '-'.\"\n )\n\n # Check that this version does not have a PEP 440 local segment attached\n # to it.\n if parsed.local is not None:\n raise wtforms.validators.ValidationError(\"Can't use PEP 440 local versions.\")\n\n\ndef _parse_legacy_requirement(requirement):\n parsed = _legacy_specifier_re.search(requirement)\n if parsed is None:\n raise ValueError(\"Invalid requirement.\")\n return parsed.groupdict()[\"name\"], parsed.groupdict()[\"specifier\"]\n\n\ndef _validate_pep440_specifier(specifier):\n try:\n packaging.specifiers.SpecifierSet(specifier)\n except packaging.specifiers.InvalidSpecifier:\n raise wtforms.validators.ValidationError(\n \"Invalid specifier in requirement.\"\n ) from None\n\n\ndef _validate_pep440_specifier_field(form, field):\n return _validate_pep440_specifier(field.data)\n\n\ndef _validate_legacy_non_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement.replace(\"_\", \"\"))\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't direct dependency: {!r}\".format(requirement)\n )\n\n if any(\n not identifier.isalnum() or identifier[0].isdigit()\n for identifier in req.name.split(\".\")\n ):\n raise wtforms.validators.ValidationError(\"Use a valid Python identifier.\")\n\n\ndef _validate_legacy_non_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_non_dist_req(datum)\n\n\ndef _validate_legacy_dist_req(requirement):\n try:\n req = packaging.requirements.Requirement(requirement)\n except packaging.requirements.InvalidRequirement:\n raise wtforms.validators.ValidationError(\n \"Invalid requirement: {!r}.\".format(requirement)\n ) from None\n\n if req.url is not None:\n raise wtforms.validators.ValidationError(\n \"Can't have direct dependency: {!r}\".format(requirement)\n )\n\n if any(packaging.version.Version(spec.version).local for spec in req.specifier):\n raise wtforms.validators.ValidationError(\n \"Can't have dependency with local version: {!r}\".format(requirement)\n )\n\n\ndef _validate_legacy_dist_req_list(form, field):\n for datum in field.data:\n _validate_legacy_dist_req(datum)\n\n\ndef _validate_requires_external(requirement):\n name, specifier = _parse_legacy_requirement(requirement)\n\n # TODO: Is it really reasonable to parse the specifier using PEP 440?\n if specifier is not None:\n _validate_pep440_specifier(specifier)\n\n\ndef _validate_requires_external_list(form, field):\n for datum in field.data:\n _validate_requires_external(datum)\n\n\ndef _validate_project_url(value):\n try:\n label, url = value.split(\", \", 1)\n except ValueError:\n raise wtforms.validators.ValidationError(\n \"Use both a label and an URL.\"\n ) from None\n\n if not label:\n raise wtforms.validators.ValidationError(\"Use a label.\")\n\n if len(label) > 32:\n raise wtforms.validators.ValidationError(\"Use 32 characters or less.\")\n\n if not url:\n raise wtforms.validators.ValidationError(\"Use an URL.\")\n\n if not http.is_valid_uri(url, require_authority=False):\n raise wtforms.validators.ValidationError(\"Use valid URL.\")\n\n\ndef _validate_project_url_list(form, field):\n for datum in field.data:\n _validate_project_url(datum)\n\n\ndef _validate_rfc822_email_field(form, field):\n email_validator = wtforms.validators.Email(message=\"Use a valid email address\")\n addresses = email.utils.getaddresses([field.data])\n\n for real_name, address in addresses:\n email_validator(form, type(\"field\", (), {\"data\": address}))\n\n\ndef _validate_description_content_type(form, field):\n def _raise(message):\n raise wtforms.validators.ValidationError(\n f\"Invalid description content type: {message}\"\n )\n\n content_type, parameters = parse_header(field.data)\n if content_type not in _valid_description_content_types:\n _raise(\"type/subtype is not valid\")\n\n charset = parameters.get(\"charset\")\n if charset and charset != \"UTF-8\":\n _raise(\"Use a valid charset\")\n\n variant = parameters.get(\"variant\")\n if (\n content_type == \"text/markdown\"\n and variant\n and variant not in _valid_markdown_variants\n ):\n _raise(\n \"Use a valid variant, expected one of {}\".format(\n \", \".join(_valid_markdown_variants)\n )\n )\n\n\ndef _construct_dependencies(form, types):\n for name, kind in types.items():\n for item in getattr(form, name).data:\n yield Dependency(kind=kind.value, specifier=item)\n\n\nclass ListField(wtforms.Field):\n def process_formdata(self, valuelist):\n self.data = [v.strip() for v in valuelist if v.strip()]\n\n\n# TODO: Eventually this whole validation thing should move to the packaging\n# library and we should just call that. However until PEP 426 is done\n# that library won't have an API for this.\nclass MetadataForm(forms.Form):\n\n # Metadata version\n metadata_version = wtforms.StringField(\n description=\"Metadata-Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n # Note: This isn't really Metadata 2.0, however bdist_wheel\n # claims it is producing a Metadata 2.0 metadata when in\n # reality it's more like 1.2 with some extensions.\n [\"1.0\", \"1.1\", \"1.2\", \"2.0\", \"2.1\"],\n message=\"Use a known metadata version.\",\n ),\n ],\n )\n\n # Identity Project and Release\n name = wtforms.StringField(\n description=\"Name\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n _project_name_re,\n re.IGNORECASE,\n message=(\n \"Start and end with a letter or numeral containing \"\n \"only ASCII numeric and '.', '_' and '-'.\"\n ),\n ),\n ],\n )\n version = wtforms.StringField(\n description=\"Version\",\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.Regexp(\n r\"^(?!\\s).*(?<!\\s)$\",\n message=\"Can't have leading or trailing whitespace.\",\n ),\n _validate_pep440_version,\n ],\n )\n\n # Additional Release metadata\n summary = wtforms.StringField(\n description=\"Summary\",\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Length(max=512),\n wtforms.validators.Regexp(\n r\"^.+$\", # Rely on the fact that . doesn't match a newline.\n message=\"Use a single line only.\",\n ),\n ],\n )\n description = wtforms.StringField(\n description=\"Description\", validators=[wtforms.validators.Optional()]\n )\n author = wtforms.StringField(\n description=\"Author\", validators=[wtforms.validators.Optional()]\n )\n description_content_type = wtforms.StringField(\n description=\"Description-Content-Type\",\n validators=[wtforms.validators.Optional(), _validate_description_content_type],\n )\n author_email = wtforms.StringField(\n description=\"Author-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n maintainer = wtforms.StringField(\n description=\"Maintainer\", validators=[wtforms.validators.Optional()]\n )\n maintainer_email = wtforms.StringField(\n description=\"Maintainer-email\",\n validators=[wtforms.validators.Optional(), _validate_rfc822_email_field],\n )\n license = wtforms.StringField(\n description=\"License\", validators=[wtforms.validators.Optional()]\n )\n keywords = wtforms.StringField(\n description=\"Keywords\", validators=[wtforms.validators.Optional()]\n )\n classifiers = wtforms.fields.SelectMultipleField(description=\"Classifier\")\n platform = wtforms.StringField(\n description=\"Platform\", validators=[wtforms.validators.Optional()]\n )\n\n # URLs\n home_page = wtforms.StringField(\n description=\"Home-Page\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n download_url = wtforms.StringField(\n description=\"Download-URL\",\n validators=[wtforms.validators.Optional(), forms.URIValidator()],\n )\n\n # Dependency Information\n requires_python = wtforms.StringField(\n description=\"Requires-Python\",\n validators=[wtforms.validators.Optional(), _validate_pep440_specifier_field],\n )\n\n # File information\n pyversion = wtforms.StringField(validators=[wtforms.validators.Optional()])\n filetype = wtforms.StringField(\n validators=[\n wtforms.validators.DataRequired(),\n wtforms.validators.AnyOf(\n [\n \"bdist_dmg\",\n \"bdist_dumb\",\n \"bdist_egg\",\n \"bdist_msi\",\n \"bdist_rpm\",\n \"bdist_wheel\",\n \"bdist_wininst\",\n \"sdist\",\n ],\n message=\"Use a known file type.\",\n ),\n ]\n )\n comment = wtforms.StringField(validators=[wtforms.validators.Optional()])\n md5_digest = wtforms.StringField(validators=[wtforms.validators.Optional()])\n sha256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, SHA256 message digest.\",\n ),\n ]\n )\n blake2_256_digest = wtforms.StringField(\n validators=[\n wtforms.validators.Optional(),\n wtforms.validators.Regexp(\n r\"^[A-F0-9]{64}$\",\n re.IGNORECASE,\n message=\"Use a valid, hex-encoded, BLAKE2 message digest.\",\n ),\n ]\n )\n\n # Legacy dependency information\n requires = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n provides = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n obsoletes = ListField(\n validators=[wtforms.validators.Optional(), _validate_legacy_non_dist_req_list]\n )\n\n # Newer dependency information\n requires_dist = ListField(\n description=\"Requires-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n provides_dist = ListField(\n description=\"Provides-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n obsoletes_dist = ListField(\n description=\"Obsoletes-Dist\",\n validators=[wtforms.validators.Optional(), _validate_legacy_dist_req_list],\n )\n requires_external = ListField(\n description=\"Requires-External\",\n validators=[wtforms.validators.Optional(), _validate_requires_external_list],\n )\n\n # Newer metadata information\n project_urls = ListField(\n description=\"Project-URL\",\n validators=[wtforms.validators.Optional(), _validate_project_url_list],\n )\n\n def full_validate(self):\n # All non source releases *must* have a pyversion\n if (\n self.filetype.data\n and self.filetype.data != \"sdist\"\n and not self.pyversion.data\n ):\n raise wtforms.validators.ValidationError(\n \"Python version is required for binary distribution uploads.\"\n )\n\n # All source releases *must* have a pyversion of \"source\"\n if self.filetype.data == \"sdist\":\n if not self.pyversion.data:\n self.pyversion.data = \"source\"\n elif self.pyversion.data != \"source\":\n raise wtforms.validators.ValidationError(\n \"Use 'source' as Python version for an sdist.\"\n )\n\n # We *must* have at least one digest to verify against.\n if not self.md5_digest.data and not self.sha256_digest.data:\n raise wtforms.validators.ValidationError(\n \"Include at least one message digest.\"\n )\n\n\n_safe_zipnames = re.compile(r\"(purelib|platlib|headers|scripts|data).+\", re.I)\n# .tar uncompressed, .tar.gz .tgz, .tar.bz2 .tbz2\n_tar_filenames_re = re.compile(r\"\\.(?:tar$|t(?:ar\\.)?(?P<z_type>gz|bz2)$)\")\n\n\ndef _is_valid_dist_file(filename, filetype):\n \"\"\"\n Perform some basic checks to see whether the indicated file could be\n a valid distribution file.\n \"\"\"\n\n # If our file is a zipfile, then ensure that it's members are only\n # compressed with supported compression methods.\n if zipfile.is_zipfile(filename):\n with zipfile.ZipFile(filename) as zfp:\n for zinfo in zfp.infolist():\n if zinfo.compress_type not in {\n zipfile.ZIP_STORED,\n zipfile.ZIP_DEFLATED,\n }:\n return False\n\n tar_fn_match = _tar_filenames_re.search(filename)\n if tar_fn_match:\n # Ensure that this is a valid tar file, and that it contains PKG-INFO.\n z_type = tar_fn_match.group(\"z_type\") or \"\"\n try:\n with tarfile.open(filename, f\"r:{z_type}\") as tar:\n # This decompresses the entire stream to validate it and the\n # tar within. Easy CPU DoS attack. :/\n bad_tar = True\n member = tar.next()\n while member:\n parts = os.path.split(member.name)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n bad_tar = False\n member = tar.next()\n if bad_tar:\n return False\n except tarfile.ReadError:\n return False\n elif filename.endswith(\".exe\"):\n # The only valid filetype for a .exe file is \"bdist_wininst\".\n if filetype != \"bdist_wininst\":\n return False\n\n # Ensure that the .exe is a valid zip file, and that all of the files\n # contained within it have safe filenames.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch where there\n # isn't one.\n for zipname in zfp.namelist(): # pragma: no branch\n if not _safe_zipnames.match(zipname):\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".msi\"):\n # The only valid filetype for a .msi is \"bdist_msi\"\n if filetype != \"bdist_msi\":\n return False\n\n # Check the first 8 bytes of the MSI file. This was taken from the\n # legacy implementation of PyPI which itself took it from the\n # implementation of `file` I believe.\n with open(filename, \"rb\") as fp:\n if fp.read(8) != b\"\\xD0\\xCF\\x11\\xE0\\xA1\\xB1\\x1A\\xE1\":\n return False\n elif filename.endswith(\".zip\") or filename.endswith(\".egg\"):\n # Ensure that the .zip/.egg is a valid zip file, and that it has a\n # PKG-INFO file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"PKG-INFO\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n elif filename.endswith(\".whl\"):\n # Ensure that the .whl is a valid zip file, and that it has a WHEEL\n # file.\n try:\n with zipfile.ZipFile(filename, \"r\") as zfp:\n for zipname in zfp.namelist():\n parts = os.path.split(zipname)\n if len(parts) == 2 and parts[1] == \"WHEEL\":\n # We need the no branch below to work around a bug in\n # coverage.py where it's detecting a missed branch\n # where there isn't one.\n break # pragma: no branch\n else:\n return False\n except zipfile.BadZipFile:\n return False\n\n # If we haven't yet decided it's not valid, then we'll assume it is and\n # allow it.\n return True\n\n\ndef _is_duplicate_file(db_session, filename, hashes):\n \"\"\"\n Check to see if file already exists, and if it's content matches.\n A file is considered to exist if its filename *or* blake2 digest are\n present in a file row in the database.\n\n Returns:\n - True: This file is a duplicate and all further processing should halt.\n - False: This file exists, but it is not a duplicate.\n - None: This file does not exist.\n \"\"\"\n\n file_ = (\n db_session.query(File)\n .filter(\n (File.filename == filename)\n | (File.blake2_256_digest == hashes[\"blake2_256\"])\n )\n .first()\n )\n\n if file_ is not None:\n return (\n file_.filename == filename\n and file_.sha256_digest == hashes[\"sha256\"]\n and file_.md5_digest == hashes[\"md5\"]\n and file_.blake2_256_digest == hashes[\"blake2_256\"]\n )\n\n return None\n\n\ndef _no_deprecated_classifiers(request):\n deprecated_classifiers = {\n classifier.classifier\n for classifier in (\n request.db.query(Classifier.classifier)\n .filter(Classifier.deprecated.is_(True))\n .all()\n )\n }\n\n def validate_no_deprecated_classifiers(form, field):\n invalid_classifiers = set(field.data or []) & deprecated_classifiers\n if invalid_classifiers:\n first_invalid_classifier = sorted(invalid_classifiers)[0]\n host = request.registry.settings.get(\"warehouse.domain\")\n classifiers_url = request.route_url(\"classifiers\", _host=host)\n\n raise wtforms.validators.ValidationError(\n f\"Classifier {first_invalid_classifier!r} has been \"\n f\"deprecated, see {classifiers_url} for a list of valid \"\n \"classifiers.\"\n )\n\n return validate_no_deprecated_classifiers\n\n\n@view_config(\n route_name=\"forklift.legacy.file_upload\",\n uses_session=True,\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef file_upload(request):\n # If we're in read-only mode, let upload clients know\n if request.flags.enabled(\"read-only\"):\n raise _exc_with_message(\n HTTPForbidden, \"Read-only mode: Uploads are temporarily disabled\"\n )\n\n # Log an attempt to upload\n metrics = request.find_service(IMetricsService, context=None)\n metrics.increment(\"warehouse.upload.attempt\")\n\n # Before we do anything, if there isn't an authenticated user with this\n # request, then we'll go ahead and bomb out.\n if request.authenticated_userid is None:\n raise _exc_with_message(\n HTTPForbidden, \"Invalid or non-existent authentication information.\"\n )\n\n # Ensure that user has a verified, primary email address. This should both\n # reduce the ease of spam account creation and activity, as well as act as\n # a forcing function for https://github.com/pypa/warehouse/issues/3632.\n # TODO: Once https://github.com/pypa/warehouse/issues/3632 has been solved,\n # we might consider a different condition, possibly looking at\n # User.is_active instead.\n if not (request.user.primary_email and request.user.primary_email.verified):\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"User {!r} does not have a verified primary email address. \"\n \"Please add a verified primary email before attempting to \"\n \"upload to PyPI. See {project_help} for more information.\"\n \"for more information.\"\n ).format(\n request.user.username,\n project_help=request.help_url(_anchor=\"verified-email\"),\n ),\n ) from None\n\n # Do some cleanup of the various form fields\n for key in list(request.POST):\n value = request.POST.get(key)\n if isinstance(value, str):\n # distutils \"helpfully\" substitutes unknown, but \"required\" values\n # with the string \"UNKNOWN\". This is basically never what anyone\n # actually wants so we'll just go ahead and delete anything whose\n # value is UNKNOWN.\n if value.strip() == \"UNKNOWN\":\n del request.POST[key]\n\n # Escape NUL characters, which psycopg doesn't like\n if \"\\x00\" in value:\n request.POST[key] = value.replace(\"\\x00\", \"\\\\x00\")\n\n # We require protocol_version 1, it's the only supported version however\n # passing a different version should raise an error.\n if request.POST.get(\"protocol_version\", \"1\") != \"1\":\n raise _exc_with_message(HTTPBadRequest, \"Unknown protocol version.\")\n\n # Check if any fields were supplied as a tuple and have become a\n # FieldStorage. The 'content' and 'gpg_signature' fields _should_ be a\n # FieldStorage, however.\n # ref: https://github.com/pypa/warehouse/issues/2185\n # ref: https://github.com/pypa/warehouse/issues/2491\n for field in set(request.POST) - {\"content\", \"gpg_signature\"}:\n values = request.POST.getall(field)\n if any(isinstance(value, FieldStorage) for value in values):\n raise _exc_with_message(HTTPBadRequest, f\"{field}: Should not be a tuple.\")\n\n # Look up all of the valid classifiers\n all_classifiers = request.db.query(Classifier).all()\n\n # Validate and process the incoming metadata.\n form = MetadataForm(request.POST)\n\n # Add a validator for deprecated classifiers\n form.classifiers.validators.append(_no_deprecated_classifiers(request))\n\n form.classifiers.choices = [(c.classifier, c.classifier) for c in all_classifiers]\n if not form.validate():\n for field_name in _error_message_order:\n if field_name in form.errors:\n break\n else:\n field_name = sorted(form.errors.keys())[0]\n\n if field_name in form:\n field = form[field_name]\n if field.description and isinstance(field, wtforms.StringField):\n error_message = (\n \"{value!r} is an invalid value for {field}. \".format(\n value=field.data, field=field.description\n )\n + \"Error: {} \".format(form.errors[field_name][0])\n + \"See \"\n \"https://packaging.python.org/specifications/core-metadata\"\n )\n else:\n error_message = \"Invalid value for {field}. Error: {msgs[0]}\".format(\n field=field_name, msgs=form.errors[field_name]\n )\n else:\n error_message = \"Error: {}\".format(form.errors[field_name][0])\n\n raise _exc_with_message(HTTPBadRequest, error_message)\n\n # Ensure that we have file data in the request.\n if \"content\" not in request.POST:\n raise _exc_with_message(HTTPBadRequest, \"Upload payload does not have a file.\")\n\n # Look up the project first before doing anything else, this is so we can\n # automatically register it if we need to and can check permissions before\n # going any further.\n try:\n project = (\n request.db.query(Project)\n .filter(\n Project.normalized_name == func.normalize_pep426_name(form.name.data)\n )\n .one()\n )\n except NoResultFound:\n # Check for AdminFlag set by a PyPI Administrator disabling new project\n # registration, reasons for this include Spammers, security\n # vulnerabilities, or just wanting to be lazy and not worry ;)\n if request.flags.enabled(\"disallow-new-project-registration\"):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"New project registration temporarily disabled. \"\n \"See {projecthelp} for details\"\n ).format(projecthelp=request.help_url(_anchor=\"admin-intervention\")),\n ) from None\n\n # Before we create the project, we're going to check our blacklist to\n # see if this project is even allowed to be registered. If it is not,\n # then we're going to deny the request to create this project.\n if request.db.query(\n exists().where(\n BlacklistedProject.name == func.normalize_pep426_name(form.name.data)\n )\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed. \"\n \"See {projecthelp} \"\n \"for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # Also check for collisions with Python Standard Library modules.\n if packaging.utils.canonicalize_name(form.name.data) in STDLIB_PROHIBITTED:\n raise _exc_with_message(\n HTTPBadRequest,\n (\n \"The name {name!r} isn't allowed (conflict with Python \"\n \"Standard Library module name). See \"\n \"{projecthelp} for more information.\"\n ).format(\n name=form.name.data,\n projecthelp=request.help_url(_anchor=\"project-name\"),\n ),\n ) from None\n\n # The project doesn't exist in our database, so first we'll check for\n # projects with a similar name\n squattees = (\n request.db.query(Project)\n .filter(\n func.levenshtein(\n Project.normalized_name, func.normalize_pep426_name(form.name.data)\n )\n <= 2\n )\n .all()\n )\n\n # Next we'll create the project\n project = Project(name=form.name.data)\n request.db.add(project)\n\n # Now that the project exists, add any squats which it is the squatter for\n for squattee in squattees:\n request.db.add(Squat(squatter=project, squattee=squattee))\n\n # Then we'll add a role setting the current user as the \"Owner\" of the\n # project.\n request.db.add(Role(user=request.user, project=project, role_name=\"Owner\"))\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"create\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n request.db.add(\n JournalEntry(\n name=project.name,\n action=\"add Owner {}\".format(request.user.username),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # Check that the user has permission to do things to this project, if this\n # is a new project this will act as a sanity check for the role we just\n # added above.\n if not request.has_permission(\"upload\", project):\n raise _exc_with_message(\n HTTPForbidden,\n (\n \"The credential associated with user '{0}' \"\n \"isn't allowed to upload to project '{1}'. \"\n \"See {2} for more information.\"\n ).format(\n request.user.username,\n project.name,\n request.help_url(_anchor=\"project-name\"),\n ),\n )\n\n # Update name if it differs but is still equivalent. We don't need to check if\n # they are equivalent when normalized because that's already been done when we\n # queried for the project.\n if project.name != form.name.data:\n project.name = form.name.data\n\n # Render our description so we can save from having to render this data every time\n # we load a project description page.\n rendered = None\n if form.description.data:\n description_content_type = form.description_content_type.data\n if not description_content_type:\n description_content_type = \"text/x-rst\"\n\n rendered = readme.render(\n form.description.data, description_content_type, use_fallback=False\n )\n\n # Uploading should prevent broken rendered descriptions.\n if rendered is None:\n if form.description_content_type.data:\n message = (\n \"The description failed to render \"\n \"for '{description_content_type}'.\"\n ).format(description_content_type=description_content_type)\n else:\n message = (\n \"The description failed to render \"\n \"in the default format of reStructuredText.\"\n )\n raise _exc_with_message(\n HTTPBadRequest,\n \"{message} See {projecthelp} for more information.\".format(\n message=message,\n projecthelp=request.help_url(_anchor=\"description-content-type\"),\n ),\n ) from None\n\n try:\n canonical_version = packaging.utils.canonicalize_version(form.version.data)\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project)\n & (Release.canonical_version == canonical_version)\n )\n .one()\n )\n except MultipleResultsFound:\n # There are multiple releases of this project which have the same\n # canonical version that were uploaded before we checked for\n # canonical version equivalence, so return the exact match instead\n release = (\n request.db.query(Release)\n .filter(\n (Release.project == project) & (Release.version == form.version.data)\n )\n .one()\n )\n except NoResultFound:\n release = Release(\n project=project,\n _classifiers=[\n c for c in all_classifiers if c.classifier in form.classifiers.data\n ],\n dependencies=list(\n _construct_dependencies(\n form,\n {\n \"requires\": DependencyKind.requires,\n \"provides\": DependencyKind.provides,\n \"obsoletes\": DependencyKind.obsoletes,\n \"requires_dist\": DependencyKind.requires_dist,\n \"provides_dist\": DependencyKind.provides_dist,\n \"obsoletes_dist\": DependencyKind.obsoletes_dist,\n \"requires_external\": DependencyKind.requires_external,\n \"project_urls\": DependencyKind.project_url,\n },\n )\n ),\n canonical_version=canonical_version,\n description=Description(\n content_type=form.description_content_type.data,\n raw=form.description.data or \"\",\n html=rendered or \"\",\n rendered_by=readme.renderer_version(),\n ),\n **{\n k: getattr(form, k).data\n for k in {\n # This is a list of all the fields in the form that we\n # should pull off and insert into our new release.\n \"version\",\n \"summary\",\n \"license\",\n \"author\",\n \"author_email\",\n \"maintainer\",\n \"maintainer_email\",\n \"keywords\",\n \"platform\",\n \"home_page\",\n \"download_url\",\n \"requires_python\",\n }\n },\n uploader=request.user,\n uploaded_via=request.user_agent,\n )\n request.db.add(release)\n # TODO: This should be handled by some sort of database trigger or\n # a SQLAlchemy hook or the like instead of doing it inline in\n # this view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"new release\",\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better solution to this than to just do it inline inside\n # this method. Ideally the version field would just be sortable, but\n # at least this should be some sort of hook or trigger.\n releases = (\n request.db.query(Release)\n .filter(Release.project == project)\n .options(orm.load_only(Release._pypi_ordering))\n .all()\n )\n for i, r in enumerate(\n sorted(releases, key=lambda x: packaging.version.parse(x.version))\n ):\n r._pypi_ordering = i\n\n # Pull the filename out of our POST data.\n filename = request.POST[\"content\"].filename\n\n # Make sure that the filename does not contain any path separators.\n if \"/\" in filename or \"\\\\\" in filename:\n raise _exc_with_message(\n HTTPBadRequest, \"Cannot upload a file with '/' or '\\\\' in the name.\"\n )\n\n # Make sure the filename ends with an allowed extension.\n if _dist_file_regexes[project.allow_legacy_files].search(filename) is None:\n raise _exc_with_message(\n HTTPBadRequest,\n \"Invalid file extension: Use .egg, .tar.gz, .whl or .zip \"\n \"extension. (https://www.python.org/dev/peps/pep-0527)\",\n )\n\n # Make sure that our filename matches the project that it is being uploaded\n # to.\n prefix = pkg_resources.safe_name(project.name).lower()\n if not pkg_resources.safe_name(filename).lower().startswith(prefix):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Start filename for {!r} with {!r}.\".format(project.name, prefix),\n )\n\n # Check the content type of what is being uploaded\n if not request.POST[\"content\"].type or request.POST[\"content\"].type.startswith(\n \"image/\"\n ):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Ensure that the package filetype is allowed.\n # TODO: Once PEP 527 is completely implemented we should be able to delete\n # this and just move it into the form itself.\n if not project.allow_legacy_files and form.filetype.data not in {\n \"sdist\",\n \"bdist_wheel\",\n \"bdist_egg\",\n }:\n raise _exc_with_message(HTTPBadRequest, \"Unknown type of file.\")\n\n # The project may or may not have a file size specified on the project, if\n # it does then it may or may not be smaller or larger than our global file\n # size limits.\n file_size_limit = max(filter(None, [MAX_FILESIZE, project.upload_limit]))\n\n with tempfile.TemporaryDirectory() as tmpdir:\n temporary_filename = os.path.join(tmpdir, filename)\n\n # Buffer the entire file onto disk, checking the hash of the file as we\n # go along.\n with open(temporary_filename, \"wb\") as fp:\n file_size = 0\n file_hashes = {\n \"md5\": hashlib.md5(),\n \"sha256\": hashlib.sha256(),\n \"blake2_256\": hashlib.blake2b(digest_size=256 // 8),\n }\n for chunk in iter(lambda: request.POST[\"content\"].file.read(8096), b\"\"):\n file_size += len(chunk)\n if file_size > file_size_limit:\n raise _exc_with_message(\n HTTPBadRequest,\n \"File too large. \"\n + \"Limit for project {name!r} is {limit} MB. \".format(\n name=project.name, limit=file_size_limit // (1024 * 1024)\n )\n + \"See \"\n + request.help_url(_anchor=\"file-size-limit\"),\n )\n fp.write(chunk)\n for hasher in file_hashes.values():\n hasher.update(chunk)\n\n # Take our hash functions and compute the final hashes for them now.\n file_hashes = {k: h.hexdigest().lower() for k, h in file_hashes.items()}\n\n # Actually verify the digests that we've gotten. We're going to use\n # hmac.compare_digest even though we probably don't actually need to\n # because it's better safe than sorry. In the case of multiple digests\n # we expect them all to be given.\n if not all(\n [\n hmac.compare_digest(\n getattr(form, \"{}_digest\".format(digest_name)).data.lower(),\n digest_value,\n )\n for digest_name, digest_value in file_hashes.items()\n if getattr(form, \"{}_digest\".format(digest_name)).data\n ]\n ):\n raise _exc_with_message(\n HTTPBadRequest,\n \"The digest supplied does not match a digest calculated \"\n \"from the uploaded file.\",\n )\n\n # Check to see if the file that was uploaded exists already or not.\n is_duplicate = _is_duplicate_file(request.db, filename, file_hashes)\n if is_duplicate:\n return Response()\n elif is_duplicate is not None:\n raise _exc_with_message(\n HTTPBadRequest,\n # Note: Changing this error message to something that doesn't\n # start with \"File already exists\" will break the\n # --skip-existing functionality in twine\n # ref: https://github.com/pypa/warehouse/issues/3482\n # ref: https://github.com/pypa/twine/issues/332\n \"File already exists. See \"\n + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if the file that was uploaded exists in our filename log\n if request.db.query(\n request.db.query(Filename).filter(Filename.filename == filename).exists()\n ).scalar():\n raise _exc_with_message(\n HTTPBadRequest,\n \"This filename has already been used, use a \"\n \"different version. \"\n \"See \" + request.help_url(_anchor=\"file-name-reuse\"),\n )\n\n # Check to see if uploading this file would create a duplicate sdist\n # for the current release.\n if (\n form.filetype.data == \"sdist\"\n and request.db.query(\n request.db.query(File)\n .filter((File.release == release) & (File.packagetype == \"sdist\"))\n .exists()\n ).scalar()\n ):\n raise _exc_with_message(\n HTTPBadRequest, \"Only one sdist may be uploaded per release.\"\n )\n\n # Check the file to make sure it is a valid distribution file.\n if not _is_valid_dist_file(temporary_filename, form.filetype.data):\n raise _exc_with_message(HTTPBadRequest, \"Invalid distribution file.\")\n\n # Check that if it's a binary wheel, it's on a supported platform\n if filename.endswith(\".whl\"):\n wheel_info = _wheel_file_re.match(filename)\n plats = wheel_info.group(\"plat\").split(\".\")\n for plat in plats:\n if not _valid_platform_tag(plat):\n raise _exc_with_message(\n HTTPBadRequest,\n \"Binary wheel '{filename}' has an unsupported \"\n \"platform tag '{plat}'.\".format(filename=filename, plat=plat),\n )\n\n # Also buffer the entire signature file to disk.\n if \"gpg_signature\" in request.POST:\n has_signature = True\n with open(os.path.join(tmpdir, filename + \".asc\"), \"wb\") as fp:\n signature_size = 0\n for chunk in iter(\n lambda: request.POST[\"gpg_signature\"].file.read(8096), b\"\"\n ):\n signature_size += len(chunk)\n if signature_size > MAX_SIGSIZE:\n raise _exc_with_message(HTTPBadRequest, \"Signature too large.\")\n fp.write(chunk)\n\n # Check whether signature is ASCII armored\n with open(os.path.join(tmpdir, filename + \".asc\"), \"rb\") as fp:\n if not fp.read().startswith(b\"-----BEGIN PGP SIGNATURE-----\"):\n raise _exc_with_message(\n HTTPBadRequest, \"PGP signature isn't ASCII armored.\"\n )\n else:\n has_signature = False\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(Filename(filename=filename))\n\n # Store the information about the file in the database.\n file_ = File(\n release=release,\n filename=filename,\n python_version=form.pyversion.data,\n packagetype=form.filetype.data,\n comment_text=form.comment.data,\n size=file_size,\n has_signature=bool(has_signature),\n md5_digest=file_hashes[\"md5\"],\n sha256_digest=file_hashes[\"sha256\"],\n blake2_256_digest=file_hashes[\"blake2_256\"],\n # Figure out what our filepath is going to be, we're going to use a\n # directory structure based on the hash of the file contents. This\n # will ensure that the contents of the file cannot change without\n # it also changing the path that the file is saved too.\n path=\"/\".join(\n [\n file_hashes[PATH_HASHER][:2],\n file_hashes[PATH_HASHER][2:4],\n file_hashes[PATH_HASHER][4:],\n filename,\n ]\n ),\n uploaded_via=request.user_agent,\n )\n request.db.add(file_)\n\n # TODO: This should be handled by some sort of database trigger or a\n # SQLAlchemy hook or the like instead of doing it inline in this\n # view.\n request.db.add(\n JournalEntry(\n name=release.project.name,\n version=release.version,\n action=\"add {python_version} file {filename}\".format(\n python_version=file_.python_version, filename=file_.filename\n ),\n submitted_by=request.user,\n submitted_from=request.remote_addr,\n )\n )\n\n # TODO: We need a better answer about how to make this transactional so\n # this won't take affect until after a commit has happened, for\n # now we'll just ignore it and save it before the transaction is\n # committed.\n storage = request.find_service(IFileStorage)\n storage.store(\n file_.path,\n os.path.join(tmpdir, filename),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n if has_signature:\n storage.store(\n file_.pgp_path,\n os.path.join(tmpdir, filename + \".asc\"),\n meta={\n \"project\": file_.release.project.normalized_name,\n \"version\": file_.release.version,\n \"package-type\": file_.packagetype,\n \"python-version\": file_.python_version,\n },\n )\n\n # Log a successful upload\n metrics.increment(\"warehouse.upload.ok\", tags=[f\"filetype:{form.filetype.data}\"])\n\n return Response()\n\n\ndef _legacy_purge(status, *args, **kwargs):\n if status:\n requests.post(*args, **kwargs)\n\n\n@view_config(\n route_name=\"forklift.legacy.submit\", require_csrf=False, require_methods=[\"POST\"]\n)\n@view_config(\n route_name=\"forklift.legacy.submit_pkg_info\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef submit(request):\n return _exc_with_message(\n HTTPGone,\n (\n \"Project pre-registration is no longer required or supported, \"\n \"upload your files instead.\"\n ),\n )\n\n\n@view_config(\n route_name=\"forklift.legacy.doc_upload\",\n require_csrf=False,\n require_methods=[\"POST\"],\n)\ndef doc_upload(request):\n return _exc_with_message(\n HTTPGone,\n \"Uploading documentation is no longer supported, we recommend using \"\n \"https://readthedocs.org/.\",\n )\n",
"path": "warehouse/forklift/legacy.py"
}
] | diff --git a/tests/unit/forklift/test_legacy.py b/tests/unit/forklift/test_legacy.py
index 36d3378cb7b3..003610d8d508 100644
--- a/tests/unit/forklift/test_legacy.py
+++ b/tests/unit/forklift/test_legacy.py
@@ -170,6 +170,7 @@ def test_validate_legacy_dist_req_valid(self, requirement):
"_foo",
"_foo (>=1.0)",
"name @ https://github.com/pypa",
+ "test-pypi-version-specifier-dep==0.0.1+cuda9",
],
)
def test_validate_legacy_dist_req_invalid(self, requirement):
diff --git a/warehouse/forklift/legacy.py b/warehouse/forklift/legacy.py
index 74d6dfe4d432..f4a33b20a130 100644
--- a/warehouse/forklift/legacy.py
+++ b/warehouse/forklift/legacy.py
@@ -252,6 +252,11 @@ def _validate_legacy_dist_req(requirement):
"Can't have direct dependency: {!r}".format(requirement)
)
+ if any(packaging.version.Version(spec.version).local for spec in req.specifier):
+ raise wtforms.validators.ValidationError(
+ "Can't have dependency with local version: {!r}".format(requirement)
+ )
+
def _validate_legacy_dist_req_list(form, field):
for datum in field.data:
| PyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local)
PyPI accepts packages with dependencies on local versions (e.g., 0.1.0+local). I'm not sure if this is intentional or not, since PyPI will reject packages whose version is a local version.
I tested this was the case using this test package:
```
import setuptools
import os
import re
setuptools.setup(
name="test-pypi-version-specifier-main",
version="0.0.2",
author="Edward Z. Yang",
author_email="[email protected]",
description="Testing package",
long_description="Yarr",
long_description_content_type="text/markdown",
url="https://localhost/",
packages=setuptools.find_packages(),
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
"test-pypi-version-specifier-dep==0.0.1+cuda9",
],
)
```
|
pyqtgraph__pyqtgraph-2888 | [
{
"content": "import warnings\n\nfrom ..Qt import QtCore, QtGui, QtWidgets\nfrom ..widgets.VerticalLabel import VerticalLabel\nfrom .DockDrop import DockDrop\n\n\nclass Dock(QtWidgets.QWidget):\n\n sigStretchChanged = QtCore.Signal()\n sigClosed = QtCore.Signal(object)\n\n def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):\n QtWidgets.QWidget.__init__(self)\n self.dockdrop = DockDrop(self)\n self._container = None\n self._name = name\n self.area = area\n self.label = label\n if self.label is None:\n self.label = DockLabel(name, **kargs)\n self.label.dock = self\n if self.label.isClosable():\n self.label.sigCloseClicked.connect(self.close)\n self.labelHidden = False\n self.moveLabel = True ## If false, the dock is no longer allowed to move the label.\n self.autoOrient = autoOrientation\n self.orientation = 'horizontal'\n #self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.topLayout = QtWidgets.QGridLayout()\n self.topLayout.setContentsMargins(0, 0, 0, 0)\n self.topLayout.setSpacing(0)\n self.setLayout(self.topLayout)\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea = QtWidgets.QWidget()\n self.topLayout.addWidget(self.widgetArea, 1, 1)\n self.layout = QtWidgets.QGridLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.setSpacing(0)\n self.widgetArea.setLayout(self.layout)\n self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n self.widgets = []\n self.currentRow = 0\n #self.titlePos = 'top'\n self.dockdrop.raiseOverlay()\n self.hStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n border-top-width: 0px;\n }\"\"\"\n self.vStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-bottom-left-radius: 0px;\n border-left-width: 0px;\n }\"\"\"\n self.nStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n }\"\"\"\n self.dragStyle = \"\"\"\n Dock > QWidget {\n border: 4px solid #00F;\n border-radius: 5px;\n }\"\"\"\n self.setAutoFillBackground(False)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n self.setStretch(*size)\n\n if widget is not None:\n self.addWidget(widget)\n\n if hideTitle:\n self.hideTitleBar()\n\n def implements(self, name=None):\n if name is None:\n return ['dock']\n else:\n return name == 'dock'\n\n def setStretch(self, x=None, y=None):\n \"\"\"\n Set the 'target' size for this Dock.\n The actual size will be determined by comparing this Dock's\n stretch value to the rest of the docks it shares space with.\n \"\"\"\n if x is None:\n x = 0\n if y is None:\n y = 0\n self._stretch = (x, y)\n self.sigStretchChanged.emit()\n \n def stretch(self):\n return self._stretch\n\n def hideTitleBar(self):\n \"\"\"\n Hide the title bar for this Dock.\n This will prevent the Dock being moved by the user.\n \"\"\"\n self.label.hide()\n self.labelHidden = True\n self.dockdrop.removeAllowedArea('center')\n self.updateStyle()\n\n def showTitleBar(self):\n \"\"\"\n Show the title bar for this Dock.\n \"\"\"\n self.label.show()\n self.labelHidden = False\n self.dockdrop.addAllowedArea('center')\n self.updateStyle()\n\n def title(self):\n \"\"\"\n Gets the text displayed in the title bar for this dock.\n \"\"\"\n return self.label.text()\n\n def setTitle(self, text):\n \"\"\"\n Sets the text displayed in title bar for this Dock.\n \"\"\"\n self.label.setText(text)\n\n def setOrientation(self, o='auto', force=False):\n \"\"\"\n Sets the orientation of the title bar for this Dock.\n Must be one of 'auto', 'horizontal', or 'vertical'.\n By default ('auto'), the orientation is determined\n based on the aspect ratio of the Dock.\n \"\"\"\n # setOrientation may be called before the container is set in some cases\n # (via resizeEvent), so there's no need to do anything here until called\n # again by containerChanged\n if self.container() is None:\n return\n\n if o == 'auto' and self.autoOrient:\n if self.container().type() == 'tab':\n o = 'horizontal'\n elif self.width() > self.height()*1.5:\n o = 'vertical'\n else:\n o = 'horizontal'\n if force or self.orientation != o:\n self.orientation = o\n self.label.setOrientation(o)\n self.updateStyle()\n\n def updateStyle(self):\n ## updates orientation and appearance of title bar\n if self.labelHidden:\n self.widgetArea.setStyleSheet(self.nStyle)\n elif self.orientation == 'vertical':\n self.label.setOrientation('vertical')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 1, 0)\n self.widgetArea.setStyleSheet(self.vStyle)\n else:\n self.label.setOrientation('horizontal')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n def resizeEvent(self, ev):\n self.setOrientation()\n self.dockdrop.resizeOverlay(self.size())\n\n def name(self):\n return self._name\n\n def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):\n \"\"\"\n Add a new widget to the interior of this Dock.\n Each Dock uses a QGridLayout to arrange widgets within.\n \"\"\"\n if row is None:\n row = self.currentRow\n self.currentRow = max(row+1, self.currentRow)\n self.widgets.append(widget)\n self.layout.addWidget(widget, row, col, rowspan, colspan)\n self.dockdrop.raiseOverlay()\n \n def startDrag(self):\n self.drag = QtGui.QDrag(self)\n mime = QtCore.QMimeData()\n self.drag.setMimeData(mime)\n self.widgetArea.setStyleSheet(self.dragStyle)\n self.update()\n action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()\n self.updateStyle()\n\n def float(self):\n self.area.floatDock(self)\n \n def container(self):\n return self._container\n\n def containerChanged(self, c):\n if self._container is not None:\n # ask old container to close itself if it is no longer needed\n self._container.apoptose()\n self._container = c\n if c is None:\n self.area = None\n else:\n self.area = c.area\n if c.type() != 'tab':\n self.moveLabel = True\n self.label.setDim(False)\n else:\n self.moveLabel = False\n \n self.setOrientation(force=True)\n\n def raiseDock(self):\n \"\"\"If this Dock is stacked underneath others, raise it to the top.\"\"\"\n self.container().raiseDock(self)\n\n def close(self):\n \"\"\"Remove this dock from the DockArea it lives inside.\"\"\"\n if self._container is None:\n warnings.warn(f\"Cannot close dock {self} because it is not open.\", RuntimeWarning, stacklevel=2)\n return\n\n self.setParent(None)\n QtWidgets.QLabel.close(self.label)\n self.label.setParent(None)\n self._container.apoptose()\n self._container = None\n self.sigClosed.emit(self)\n\n def __repr__(self):\n return \"<Dock %s %s>\" % (self.name(), self.stretch())\n\n def dragEnterEvent(self, *args):\n self.dockdrop.dragEnterEvent(*args)\n\n def dragMoveEvent(self, *args):\n self.dockdrop.dragMoveEvent(*args)\n\n def dragLeaveEvent(self, *args):\n self.dockdrop.dragLeaveEvent(*args)\n\n def dropEvent(self, *args):\n self.dockdrop.dropEvent(*args)\n\n\nclass DockLabel(VerticalLabel):\n\n sigClicked = QtCore.Signal(object, object)\n sigCloseClicked = QtCore.Signal()\n\n def __init__(self, text, closable=False, fontSize=\"12px\"):\n self.dim = False\n self.fixedWidth = False\n self.fontSize = fontSize\n VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)\n self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.dock = None\n self.updateStyle()\n self.setAutoFillBackground(False)\n self.mouseMoved = False\n\n self.closeButton = None\n if closable:\n self.closeButton = QtWidgets.QToolButton(self)\n self.closeButton.clicked.connect(self.sigCloseClicked)\n self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))\n\n def updateStyle(self):\n r = '3px'\n if self.dim:\n fg = '#aaa'\n bg = '#44a'\n border = '#339'\n else:\n fg = '#fff'\n bg = '#66c'\n border = '#55B'\n\n if self.orientation == 'vertical':\n self.vStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: 0px;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: %s;\n border-width: 0px;\n border-right: 2px solid %s;\n padding-top: 3px;\n padding-bottom: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.vStyle)\n else:\n self.hStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: %s;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: 0px;\n border-width: 0px;\n border-bottom: 2px solid %s;\n padding-left: 3px;\n padding-right: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.hStyle)\n\n def setDim(self, d):\n if self.dim != d:\n self.dim = d\n self.updateStyle()\n\n def setOrientation(self, o):\n VerticalLabel.setOrientation(self, o)\n self.updateStyle()\n\n def isClosable(self):\n return self.closeButton is not None\n\n def mousePressEvent(self, ev):\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.pressPos = lpos\n self.mouseMoved = False\n ev.accept()\n\n def mouseMoveEvent(self, ev):\n if not self.mouseMoved:\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()\n\n if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.startDrag()\n ev.accept()\n\n def mouseReleaseEvent(self, ev):\n ev.accept()\n if not self.mouseMoved:\n self.sigClicked.emit(self, ev)\n\n def mouseDoubleClickEvent(self, ev):\n if ev.button() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.float()\n\n def resizeEvent (self, ev):\n if self.closeButton:\n if self.orientation == 'vertical':\n size = ev.size().width()\n pos = QtCore.QPoint(0, 0)\n else:\n size = ev.size().height()\n pos = QtCore.QPoint(ev.size().width() - size, 0)\n self.closeButton.setFixedSize(QtCore.QSize(size, size))\n self.closeButton.move(pos)\n super(DockLabel,self).resizeEvent(ev)\n",
"path": "pyqtgraph/dockarea/Dock.py"
}
] | [
{
"content": "import warnings\n\nfrom ..Qt import QtCore, QtGui, QtWidgets\nfrom ..widgets.VerticalLabel import VerticalLabel\nfrom .DockDrop import DockDrop\n\n\nclass Dock(QtWidgets.QWidget):\n\n sigStretchChanged = QtCore.Signal()\n sigClosed = QtCore.Signal(object)\n\n def __init__(self, name, area=None, size=(10, 10), widget=None, hideTitle=False, autoOrientation=True, label=None, **kargs):\n QtWidgets.QWidget.__init__(self)\n self.dockdrop = DockDrop(self)\n self._container = None\n self._name = name\n self.area = area\n self.label = label\n if self.label is None:\n self.label = DockLabel(name, **kargs)\n self.label.dock = self\n if self.label.isClosable():\n self.label.sigCloseClicked.connect(self.close)\n self.labelHidden = False\n self.moveLabel = True ## If false, the dock is no longer allowed to move the label.\n self.autoOrient = autoOrientation\n self.orientation = 'horizontal'\n #self.label.setAlignment(QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.topLayout = QtWidgets.QGridLayout()\n self.topLayout.setContentsMargins(0, 0, 0, 0)\n self.topLayout.setSpacing(0)\n self.setLayout(self.topLayout)\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea = QtWidgets.QWidget()\n self.topLayout.addWidget(self.widgetArea, 1, 1)\n self.layout = QtWidgets.QGridLayout()\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.layout.setSpacing(0)\n self.widgetArea.setLayout(self.layout)\n self.widgetArea.setSizePolicy(QtWidgets.QSizePolicy.Policy.Expanding, QtWidgets.QSizePolicy.Policy.Expanding)\n self.widgets = []\n self.currentRow = 0\n #self.titlePos = 'top'\n self.dockdrop.raiseOverlay()\n self.hStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-top-right-radius: 0px;\n border-top-width: 0px;\n }\"\"\"\n self.vStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n border-top-left-radius: 0px;\n border-bottom-left-radius: 0px;\n border-left-width: 0px;\n }\"\"\"\n self.nStyle = \"\"\"\n Dock > QWidget {\n border: 1px solid #000;\n border-radius: 5px;\n }\"\"\"\n self.dragStyle = \"\"\"\n Dock > QWidget {\n border: 4px solid #00F;\n border-radius: 5px;\n }\"\"\"\n self.setAutoFillBackground(False)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n self.setStretch(*size)\n\n if widget is not None:\n self.addWidget(widget)\n\n if hideTitle:\n self.hideTitleBar()\n\n def implements(self, name=None):\n if name is None:\n return ['dock']\n else:\n return name == 'dock'\n\n def setStretch(self, x=None, y=None):\n \"\"\"\n Set the 'target' size for this Dock.\n The actual size will be determined by comparing this Dock's\n stretch value to the rest of the docks it shares space with.\n \"\"\"\n if x is None:\n x = 0\n if y is None:\n y = 0\n self._stretch = (x, y)\n self.sigStretchChanged.emit()\n \n def stretch(self):\n return self._stretch\n\n def hideTitleBar(self):\n \"\"\"\n Hide the title bar for this Dock.\n This will prevent the Dock being moved by the user.\n \"\"\"\n self.label.hide()\n self.labelHidden = True\n self.dockdrop.removeAllowedArea('center')\n self.updateStyle()\n\n def showTitleBar(self):\n \"\"\"\n Show the title bar for this Dock.\n \"\"\"\n self.label.show()\n self.labelHidden = False\n self.dockdrop.addAllowedArea('center')\n self.updateStyle()\n\n def title(self):\n \"\"\"\n Gets the text displayed in the title bar for this dock.\n \"\"\"\n return self.label.text()\n\n def setTitle(self, text):\n \"\"\"\n Sets the text displayed in title bar for this Dock.\n \"\"\"\n self.label.setText(text)\n\n def setOrientation(self, o='auto', force=False):\n \"\"\"\n Sets the orientation of the title bar for this Dock.\n Must be one of 'auto', 'horizontal', or 'vertical'.\n By default ('auto'), the orientation is determined\n based on the aspect ratio of the Dock.\n \"\"\"\n # setOrientation may be called before the container is set in some cases\n # (via resizeEvent), so there's no need to do anything here until called\n # again by containerChanged\n if self.container() is None:\n return\n\n if o == 'auto' and self.autoOrient:\n if self.container().type() == 'tab':\n o = 'horizontal'\n elif self.width() > self.height()*1.5:\n o = 'vertical'\n else:\n o = 'horizontal'\n if force or self.orientation != o:\n self.orientation = o\n self.label.setOrientation(o)\n self.updateStyle()\n\n def updateStyle(self):\n ## updates orientation and appearance of title bar\n if self.labelHidden:\n self.widgetArea.setStyleSheet(self.nStyle)\n elif self.orientation == 'vertical':\n self.label.setOrientation('vertical')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 1, 0)\n self.widgetArea.setStyleSheet(self.vStyle)\n else:\n self.label.setOrientation('horizontal')\n if self.moveLabel:\n self.topLayout.addWidget(self.label, 0, 1)\n self.widgetArea.setStyleSheet(self.hStyle)\n\n def resizeEvent(self, ev):\n self.setOrientation()\n self.dockdrop.resizeOverlay(self.size())\n\n def name(self):\n return self._name\n\n def addWidget(self, widget, row=None, col=0, rowspan=1, colspan=1):\n \"\"\"\n Add a new widget to the interior of this Dock.\n Each Dock uses a QGridLayout to arrange widgets within.\n \"\"\"\n if row is None:\n row = self.currentRow\n self.currentRow = max(row+1, self.currentRow)\n self.widgets.append(widget)\n self.layout.addWidget(widget, row, col, rowspan, colspan)\n self.dockdrop.raiseOverlay()\n \n def startDrag(self):\n self.drag = QtGui.QDrag(self)\n mime = QtCore.QMimeData()\n self.drag.setMimeData(mime)\n self.widgetArea.setStyleSheet(self.dragStyle)\n self.update()\n action = self.drag.exec() if hasattr(self.drag, 'exec') else self.drag.exec_()\n self.updateStyle()\n\n def float(self):\n self.area.floatDock(self)\n \n def container(self):\n return self._container\n\n def containerChanged(self, c):\n if self._container is not None:\n # ask old container to close itself if it is no longer needed\n self._container.apoptose(propagate=False)\n self._container = c\n if c is None:\n self.area = None\n else:\n self.area = c.area\n if c.type() != 'tab':\n self.moveLabel = True\n self.label.setDim(False)\n else:\n self.moveLabel = False\n \n self.setOrientation(force=True)\n\n def raiseDock(self):\n \"\"\"If this Dock is stacked underneath others, raise it to the top.\"\"\"\n self.container().raiseDock(self)\n\n def close(self):\n \"\"\"Remove this dock from the DockArea it lives inside.\"\"\"\n if self._container is None:\n warnings.warn(f\"Cannot close dock {self} because it is not open.\", RuntimeWarning, stacklevel=2)\n return\n\n self.setParent(None)\n QtWidgets.QLabel.close(self.label)\n self.label.setParent(None)\n self._container.apoptose()\n self._container = None\n self.sigClosed.emit(self)\n\n def __repr__(self):\n return \"<Dock %s %s>\" % (self.name(), self.stretch())\n\n def dragEnterEvent(self, *args):\n self.dockdrop.dragEnterEvent(*args)\n\n def dragMoveEvent(self, *args):\n self.dockdrop.dragMoveEvent(*args)\n\n def dragLeaveEvent(self, *args):\n self.dockdrop.dragLeaveEvent(*args)\n\n def dropEvent(self, *args):\n self.dockdrop.dropEvent(*args)\n\n\nclass DockLabel(VerticalLabel):\n\n sigClicked = QtCore.Signal(object, object)\n sigCloseClicked = QtCore.Signal()\n\n def __init__(self, text, closable=False, fontSize=\"12px\"):\n self.dim = False\n self.fixedWidth = False\n self.fontSize = fontSize\n VerticalLabel.__init__(self, text, orientation='horizontal', forceWidth=False)\n self.setAlignment(QtCore.Qt.AlignmentFlag.AlignTop|QtCore.Qt.AlignmentFlag.AlignHCenter)\n self.dock = None\n self.updateStyle()\n self.setAutoFillBackground(False)\n self.mouseMoved = False\n\n self.closeButton = None\n if closable:\n self.closeButton = QtWidgets.QToolButton(self)\n self.closeButton.clicked.connect(self.sigCloseClicked)\n self.closeButton.setIcon(QtWidgets.QApplication.style().standardIcon(QtWidgets.QStyle.StandardPixmap.SP_TitleBarCloseButton))\n\n def updateStyle(self):\n r = '3px'\n if self.dim:\n fg = '#aaa'\n bg = '#44a'\n border = '#339'\n else:\n fg = '#fff'\n bg = '#66c'\n border = '#55B'\n\n if self.orientation == 'vertical':\n self.vStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: 0px;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: %s;\n border-width: 0px;\n border-right: 2px solid %s;\n padding-top: 3px;\n padding-bottom: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.vStyle)\n else:\n self.hStyle = \"\"\"DockLabel {\n background-color : %s;\n color : %s;\n border-top-right-radius: %s;\n border-top-left-radius: %s;\n border-bottom-right-radius: 0px;\n border-bottom-left-radius: 0px;\n border-width: 0px;\n border-bottom: 2px solid %s;\n padding-left: 3px;\n padding-right: 3px;\n font-size: %s;\n }\"\"\" % (bg, fg, r, r, border, self.fontSize)\n self.setStyleSheet(self.hStyle)\n\n def setDim(self, d):\n if self.dim != d:\n self.dim = d\n self.updateStyle()\n\n def setOrientation(self, o):\n VerticalLabel.setOrientation(self, o)\n self.updateStyle()\n\n def isClosable(self):\n return self.closeButton is not None\n\n def mousePressEvent(self, ev):\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.pressPos = lpos\n self.mouseMoved = False\n ev.accept()\n\n def mouseMoveEvent(self, ev):\n if not self.mouseMoved:\n lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()\n self.mouseMoved = (lpos - self.pressPos).manhattanLength() > QtWidgets.QApplication.startDragDistance()\n\n if self.mouseMoved and ev.buttons() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.startDrag()\n ev.accept()\n\n def mouseReleaseEvent(self, ev):\n ev.accept()\n if not self.mouseMoved:\n self.sigClicked.emit(self, ev)\n\n def mouseDoubleClickEvent(self, ev):\n if ev.button() == QtCore.Qt.MouseButton.LeftButton:\n self.dock.float()\n\n def resizeEvent (self, ev):\n if self.closeButton:\n if self.orientation == 'vertical':\n size = ev.size().width()\n pos = QtCore.QPoint(0, 0)\n else:\n size = ev.size().height()\n pos = QtCore.QPoint(ev.size().width() - size, 0)\n self.closeButton.setFixedSize(QtCore.QSize(size, size))\n self.closeButton.move(pos)\n super(DockLabel,self).resizeEvent(ev)\n",
"path": "pyqtgraph/dockarea/Dock.py"
}
] | diff --git a/pyqtgraph/dockarea/Dock.py b/pyqtgraph/dockarea/Dock.py
index 99449e1b71..26e43b26ae 100644
--- a/pyqtgraph/dockarea/Dock.py
+++ b/pyqtgraph/dockarea/Dock.py
@@ -210,7 +210,7 @@ def container(self):
def containerChanged(self, c):
if self._container is not None:
# ask old container to close itself if it is no longer needed
- self._container.apoptose()
+ self._container.apoptose(propagate=False)
self._container = c
if c is None:
self.area = None
| Dock restoreState fails silently if a dock object is the only item in a V- or HContainer
### Short description
Calling restoreState() on a dockArea where a dock is the only object inside a container leads to the state being not correctly restored.
I don't know how it happened that a dock could appear as the only item in a container but one user managed to do that.
I couldn't find out how to trigger that.
Anyway, in that case way too many container are apoptose'd and the state is nor correctly restored
### Code to reproduce
```python
import json
import sys
import pyqtgraph as pg
from PySide6.QtWidgets import QMainWindow
from pyqtgraph.dockarea import Dock, DockArea
class DockApp(QMainWindow):
def __init__(self, title):
super().__init__()
self.dock_area = DockArea()
self.setCentralWidget(self.dock_area)
self.dock_area.addDock(Dock(name="Plot 1", closable=False), 'left')
self.dock_area.addDock(Dock(name="Plot 2", closable=False), 'left')
self.dock_area.addDock(Dock(name="Plot 4", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 1", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 2", closable=False), 'left')
self.dock_area.addDock(Dock(name="Table 3", closable=False), 'left')
state = json.loads("""{
"main": [
"vertical",
[
[
"horizontal",
[
[
"vertical",
[
[
"vertical",
[
[
"dock",
"Plot 1",
{}
]
],
{
"sizes": [
314
]
}
],
[
"dock",
"Plot 2",
{}
]
],
{
"sizes": [
314,
313
]
}
],
[
"vertical",
[
[
"dock",
"Table 3",
{}
],
[
"dock",
"Table 2",
{}
],
[
"dock",
"Table 1",
{}
]
],
{
"sizes": [
208,
207,
208
]
}
]
],
{
"sizes": [
784,
783
]
}
],
[
"dock",
"Plot 4",
{}
]
],
{
"sizes": [
631,
210
]
}
],
"float": []
}""")
self.dock_area.restoreState(state)
if __name__ == '__main__':
app = pg.Qt.mkQApp("LiveTrace")
window = DockApp(title='Test')
window.show()
sys.exit(app.exec())
```
### Expected behavior
All 6 docks should be restored like this:

### Real behavior
Only 2 docks are visible. All other docks are missing.

Note: No exception is thrown
### Tested environment(s)
* PyQtGraph version: 0.13.3
* Qt Python binding: PySide6 6.6.0 Qt 6.6.0
* Python version: 3.10.11 AMD64
* NumPy version: 1.26.2
* Operating system: Windows 10 22H2
* Installation method: pip
### Additional context
|
interactions-py__interactions.py-89 | [
{
"content": "import logging\nimport typing\nimport discord\nfrom inspect import iscoroutinefunction, getdoc\nfrom discord.ext import commands\nfrom . import http\nfrom . import model\nfrom . import error\nfrom . import context\nfrom .utils import manage_commands\n\n\nclass SlashCommand:\n \"\"\"\n Slash command extension class.\n\n :param client: discord.py Client or Bot instance.\n :type client: Union[discord.Client, discord.ext.commands.Bot]\n :param auto_register: Whether to register commands automatically. Default `False`.\n :type auto_register: bool\n :param override_type: Whether to override checking type of the client and try register event.\n :type override_type: bool\n\n :ivar _discord: Discord client of this client.\n :ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.\n :ivar req: :class:`.http.SlashCommandRequest` of this client.\n :ivar logger: Logger of this client.\n :ivar auto_register: Whether to register commands automatically.\n :ivar auto_delete: Whether to delete commands not found in the project automatically.\n :ivar has_listener: Whether discord client has listener add function.\n \"\"\"\n\n def __init__(self,\n client: typing.Union[discord.Client, commands.Bot],\n auto_register: bool = False,\n auto_delete: bool = False,\n override_type: bool = False):\n self._discord = client\n self.commands = {}\n self.subcommands = {}\n self.logger = logging.getLogger(\"discord_slash\")\n self.req = http.SlashCommandRequest(self.logger, self._discord)\n self.auto_register = auto_register\n self.auto_delete = auto_delete\n\n if self.auto_register and self.auto_delete:\n self._discord.loop.create_task(self.sync_all_commands())\n elif self.auto_register:\n self._discord.loop.create_task(self.register_all_commands())\n elif self.auto_delete:\n self._discord.loop.create_task(self.delete_unused_commands())\n \n if not isinstance(client, commands.Bot) and not isinstance(client,\n commands.AutoShardedBot) and not override_type:\n self.logger.info(\"Detected discord.Client! Overriding on_socket_response.\")\n self._discord.on_socket_response = self.on_socket_response\n self.has_listener = False\n else:\n if not hasattr(self._discord, 'slash'):\n self._discord.slash = self\n else:\n raise error.DuplicateSlashClient(\"You can't have duplicate SlashCommand instances!\")\n \n self._discord.add_listener(self.on_socket_response)\n self.has_listener = True\n default_add_function = self._discord.add_cog\n def override_add_cog(cog: commands.Cog):\n default_add_function(cog)\n self.get_cog_commands(cog)\n self._discord.add_cog = override_add_cog\n default_remove_function = self._discord.remove_cog\n def override_remove_cog(name: str):\n cog = self._discord.get_cog(name)\n if cog is None:\n return\n self.remove_cog_commands(cog)\n default_remove_function(name)\n self._discord.remove_cog = override_remove_cog\n \n \n\n def get_cog_commands(self, cog: commands.Cog):\n \"\"\"\n Gets slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'): # Temporary warning\n return self.logger.warning(\"Calling get_cog_commands is no longer required \"\n \"to add cog slash commands. Make sure to remove all calls to this function.\")\n cog._slash_registered = True # Assuming all went well\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n x.cog = cog\n if isinstance(x, model.CogCommandObject):\n if x.name in self.commands:\n raise error.DuplicateCommand(x.name)\n self.commands[x.name] = x\n else:\n if x.base in self.commands:\n for i in self.commands[x.base].allowed_guild_ids:\n if i not in x.allowed_guild_ids:\n x.allowed_guild_ids.append(i)\n self.commands[x.base].has_subcommands = True\n else:\n _cmd = {\n \"func\": None,\n \"description\": x.base_description,\n \"auto_convert\": {},\n \"guild_ids\": x.allowed_guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n self.commands[x.base] = model.CommandObject(x.base, _cmd)\n if x.base not in self.subcommands:\n self.subcommands[x.base] = {}\n if x.subcommand_group:\n if x.subcommand_group not in self.subcommands[x.base]:\n self.subcommands[x.base][x.subcommand_group] = {}\n if x.name in self.subcommands[x.base][x.subcommand_group]:\n raise error.DuplicateCommand(f\"{x.base} {x.subcommand_group} {x.name}\")\n self.subcommands[x.base][x.subcommand_group][x.name] = x\n else:\n if x.name in self.subcommands[x.base]:\n raise error.DuplicateCommand(f\"{x.base} {x.name}\")\n self.subcommands[x.base][x.name] = x\n\n def remove_cog_commands(self, cog):\n \"\"\"\n Removes slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog de-initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'):\n del cog._slash_registered\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if\n isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n if isinstance(x, model.CogCommandObject):\n if x.name not in self.commands:\n continue # Just in case it is removed due to subcommand.\n if x.name in self.subcommands:\n self.commands[x.name].func = None\n continue # Let's remove completely when every subcommand is removed.\n del self.commands[x.name]\n else:\n if x.base not in self.subcommands:\n continue # Just in case...\n if x.subcommand_group:\n del self.subcommands[x.base][x.subcommand_group][x.name]\n if not self.subcommands[x.base][x.subcommand_group]:\n del self.subcommands[x.base][x.subcommand_group]\n else:\n del self.subcommands[x.base][x.name]\n if not self.subcommands[x.base]:\n del self.subcommands[x.base]\n if x.base in self.commands:\n if self.commands[x.base].func:\n self.commands[x.base].has_subcommands = False\n else:\n del self.commands[x.base]\n\n async def to_dict(self):\n \"\"\"\n Converts all commands currently registered to :class:`SlashCommand` to a dictionary.\n Returns a dictionary in the format:\n\n .. code-block:: python\n\n {\n \"global\" : [], # list of global commands\n \"guild\" : {\n 0000: [] # list of commands in the guild 0000\n }\n }\n\n Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_\n \"\"\"\n await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.\n commands = {\n \"global\": [],\n \"guild\": {}\n }\n for x in self.commands:\n selected = self.commands[x]\n if selected.has_subcommands and selected.func:\n # Registering both subcommand and command with same base name / name\n # will result in only subcommand being registered,\n # so we will warn this at registering subcommands.\n self.logger.warning(f\"Detected command name with same subcommand base name! \"\n f\"This command will only have subcommand: {x}\")\n \n options = []\n if selected.has_subcommands:\n tgt = self.subcommands[x]\n for y in tgt:\n sub = tgt[y]\n if isinstance(sub, model.SubcommandObject):\n _dict = {\n \"name\": sub.name,\n \"description\": sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub.options or []\n }\n options.append(_dict)\n else:\n base_dict = {\n \"name\": y,\n \"description\": \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND_GROUP,\n \"options\": []\n }\n for z in sub:\n sub_sub = sub[z]\n _dict = {\n \"name\": sub_sub.name,\n \"description\": sub_sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub_sub.options or []\n }\n base_dict[\"options\"].append(_dict)\n if sub_sub.subcommand_group_description:\n base_dict[\"description\"] = sub_sub.subcommand_group_description\n options.append(base_dict)\n\n command_dict = {\n \"name\": x,\n \"description\": selected.description or \"No Description.\",\n \"options\": selected.options if not options else options\n }\n if selected.allowed_guild_ids:\n for y in selected.allowed_guild_ids:\n try:\n commands[\"guild\"][y].append(command_dict)\n except KeyError:\n commands[\"guild\"][y] = [command_dict]\n else:\n commands[\"global\"].append(command_dict)\n\n return commands\n\n async def sync_all_commands(self, delete_from_unused_guilds = True):\n \"\"\"\n Matches commands registered on Discord to commands registered here.\n Deletes any commands on Discord but not here, and registers any not on Discord.\n This is done with a `put` request.\n If ``auto_register`` and ``auto_delete`` are ``True`` then this will be automatically called.\n\n :param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands regestered in :class:``SlashCommand``\n \"\"\"\n commands = await self.to_dict()\n self.logger.info(\"Syncing commands...\")\n all_bot_guilds = [guild.id for guild in self._discord.guilds]\n # This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in\n # But it's the only way until discord makes an endpoint to request all the guild with cmds registered.\n\n await self.req.put_slash_commands(slash_commands = commands[\"global\"], guild_id = None)\n \n for guild in commands[\"guild\"]:\n await self.req.put_slash_commands(slash_commands = commands[\"guild\"][guild], guild_id = guild)\n all_bot_guilds.remove(guild)\n if delete_from_unused_guilds:\n for guild in all_bot_guilds:\n await self.req.put_slash_commands(slash_commands=[], guild_id = guild)\n \n self.logger.info(\"Completed syncing all commands!\")\n\n async def register_all_commands(self):\n \"\"\"\n Registers all slash commands to Discord API.\\n\n If ``auto_register`` is ``True`` and ``auto_delete`` is ``False``, then this will be automatically called.\n \"\"\"\n self.logger.info(\"Registering commands...\")\n commands = await self.to_dict()\n for command in commands[\"global\"]:\n name = command.pop('name')\n self.logger.debug(f\"Registering global command {name}\")\n await self.req.add_slash_command(guild_id = None, cmd_name = name, **command)\n \n for guild in commands[\"guild\"]:\n guild_cmds = commands[\"guild\"][guild]\n for command in guild_cmds:\n name = command.pop('name')\n self.logger.debug(f\"Registering guild command {name} in guild: {guild}\")\n await self.req.add_slash_command(guild_id = guild, cmd_name = name, **command)\n self.logger.info(\"Completed registering all commands!\")\n\n async def delete_unused_commands(self):\n \"\"\"\n Unregisters all slash commands which are not used by the project to Discord API.\\n\n This might take some time because for every guild the bot is on an API call is made.\\n\n If ``auto_delete`` is ``True`` and ``auto_register`` is ``False``, then this will be automatically called.\n \"\"\"\n await self._discord.wait_until_ready()\n self.logger.info(\"Deleting unused commands...\")\n registered_commands = {}\n global_commands = await self.req.get_all_commands(None)\n\n for cmd in global_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": None}\n\n for guild in self._discord.guilds:\n # Since we can only get commands per guild we need to loop through every one\n try:\n guild_commands = await self.req.get_all_commands(guild.id)\n except discord.Forbidden:\n # In case a guild has not granted permissions to access commands\n continue\n\n for cmd in guild_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": guild.id}\n\n for x in registered_commands:\n if x not in self.commands:\n # Delete command if not found locally\n selected = registered_commands[x]\n await self.req.remove_slash_command(selected[\"guild_id\"], selected[\"id\"])\n\n self.logger.info(\"Completed deleting unused commands!\")\n\n def add_slash_command(self,\n cmd,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n has_subcommands: bool = False):\n \"\"\"\n Registers slash command to SlashCommand.\n\n :param cmd: Command Coroutine.\n :type cmd: Coroutine\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Defaults to command docstring or ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n :param has_subcommands: Whether it has subcommand. Default ``False``.\n :type has_subcommands: bool\n \"\"\"\n name = name or cmd.__name__\n name = name.lower()\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"has_subcommands\": has_subcommands\n }\n self.commands[name] = model.CommandObject(name, _cmd)\n self.logger.debug(f\"Added command `{name}`\")\n\n def add_subcommand(self,\n cmd,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n subcommand_group_description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None):\n \"\"\"\n Registers subcommand to SlashCommand.\n\n :param cmd: Subcommand Coroutine.\n :type cmd: Coroutine\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Defaults to command docstring or ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n \"\"\"\n base = base.lower()\n subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group\n name = name or cmd.__name__\n name = name.lower()\n description = description or getdoc(cmd)\n\n if name in self.commands:\n tgt = self.commands[name]\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"auto_convert\": {},\n \"guild_ids\": guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n _sub = {\n \"func\": cmd,\n \"name\": name,\n \"description\": description,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options\n }\n if base not in self.commands:\n self.commands[base] = model.CommandObject(base, _cmd)\n else:\n self.commands[base].has_subcommands = True\n self.commands[base].allowed_guild_ids = guild_ids\n if self.commands[base].description:\n _cmd[\"description\"] = self.commands[base].description\n if base not in self.subcommands:\n self.subcommands[base] = {}\n if subcommand_group:\n if subcommand_group not in self.subcommands[base]:\n self.subcommands[base][subcommand_group] = {}\n if name in self.subcommands[base][subcommand_group]:\n raise error.DuplicateCommand(f\"{base} {subcommand_group} {name}\")\n self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group)\n else:\n if name in self.subcommands[base]:\n raise error.DuplicateCommand(f\"{base} {name}\")\n self.subcommands[base][name] = model.SubcommandObject(_sub, base, name)\n self.logger.debug(f\"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`\")\n\n def slash(self,\n *,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_id: int = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers coroutine as a slash command.\\n\n All decorator args must be passed as keyword-only args.\\n\n 1 arg for command coroutine is required for ctx(:class:`.model.SlashContext`),\n and if your slash command has some args, then those args are also required.\\n\n All args must be passed as keyword-args.\n\n .. note::\n Role, User, and Channel types are passed as id if you don't set ``auto_convert``, since API doesn't give type of the option for now.\\n\n Also, if ``options`` is passed, then ``auto_convert`` will be automatically created or overrided.\n\n .. warning::\n Unlike discord.py's command, ``*args``, keyword-only args, converters, etc. are NOT supported.\n\n Example:\n\n .. code-block:: python\n\n @slash.slash(name=\"ping\")\n async def _slash(ctx): # Normal usage.\n await ctx.send(content=f\"Pong! (`{round(bot.latency*1000)}`ms)\")\n\n\n @slash.slash(name=\"pick\")\n async def _pick(ctx, choice1, choice2): # Command with 1 or more args.\n await ctx.send(content=str(random.choice([choice1, choice2])))\n\n Example of formatting ``auto_convert``:\n\n .. code-block:: python\n\n {\"option_role\": \"role\", # For key put name of the option and for value put type of the option.\n \"option_user\": SlashCommandOptionType.USER, # Also can use an enumeration member for the type\n \"option_user_two\": 6, # or number\n \"option_channel\": \"CHANNEL\"} # or upper case string.\n\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Default ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_id: Deprecated. Use ``guild_ids`` instead.\n :type guild_id: int\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n if guild_id:\n self.logger.warning(\"`guild_id` is deprecated! `Use guild_ids` instead.\")\n guild_ids = [guild_id]\n\n def wrapper(cmd):\n self.add_slash_command(cmd, name, description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n def subcommand(self,\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers subcommand.\\n\n Unlike discord.py, you don't need base command.\\n\n All args must be passed as keyword-args.\n\n Example:\n\n .. code-block:: python\n\n # /group say <str>\n @slash.subcommand(base=\"group\", name=\"say\")\n async def _group_say(ctx, _str):\n await ctx.send(content=_str)\n\n # /group kick user <user>\n @slash.subcommand(base=\"group\",\n subcommand_group=\"kick\",\n name=\"user\",\n auto_convert={\"user\": \"user\"})\n async def _group_kick_user(ctx, user):\n ...\n\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Default ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param base_desc: Alias of ``base_description``.\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param sub_group_desc: Alias of ``subcommand_group_description``.\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n async def process_options(self, guild: discord.Guild, options: list, auto_convert: dict) -> list:\n \"\"\"\n Processes Role, User, and Channel option types to discord.py's models.\n\n :param guild: Guild of the command message.\n :type guild: discord.Guild\n :param options: Dict of options.\n :type options: list\n :param auto_convert: Dictionary of how to convert option values.\n :type auto_convert: dict\n :return: list\n \"\"\"\n if not guild:\n self.logger.info(\"This command invoke is missing guild. Skipping option process.\")\n return [x[\"value\"] for x in options]\n\n if not isinstance(guild, discord.Guild):\n return [x[\"value\"] for x in options]\n\n if not auto_convert:\n return [x[\"value\"] for x in options]\n\n converters = [\n [guild.get_member, guild.fetch_member],\n guild.get_channel,\n guild.get_role\n ]\n\n types = {\n \"user\": 0,\n \"USER\": 0,\n model.SlashCommandOptionType.USER: 0,\n \"6\": 0,\n 6: 0,\n \"channel\": 1,\n \"CHANNEL\": 1,\n model.SlashCommandOptionType.CHANNEL: 1,\n \"7\": 1,\n 7: 1,\n \"role\": 2,\n \"ROLE\": 2,\n model.SlashCommandOptionType.ROLE: 2,\n 8: 2,\n \"8\": 2\n }\n\n to_return = []\n\n for x in options:\n selected = x\n if selected[\"name\"] in auto_convert:\n if auto_convert[selected[\"name\"]] not in types:\n to_return.append(selected[\"value\"])\n continue\n loaded_converter = converters[types[auto_convert[selected[\"name\"]]]]\n if isinstance(loaded_converter, list):\n cache_first = loaded_converter[0](int(selected[\"value\"]))\n if cache_first:\n to_return.append(cache_first)\n continue\n loaded_converter = loaded_converter[1]\n try:\n to_return.append(await loaded_converter(int(selected[\"value\"]))) \\\n if iscoroutinefunction(loaded_converter) else \\\n to_return.append(loaded_converter(int(selected[\"value\"])))\n except (discord.Forbidden, discord.HTTPException):\n self.logger.warning(\"Failed fetching user! Passing ID instead.\")\n to_return.append(int(selected[\"value\"]))\n return to_return\n\n async def on_socket_response(self, msg):\n \"\"\"\n This event listener is automatically registered at initialization of this class.\n\n .. warning::\n DO NOT MANUALLY REGISTER, OVERRIDE, OR WHATEVER ACTION TO THIS COROUTINE UNLESS YOU KNOW WHAT YOU ARE DOING.\n\n :param msg: Gateway message.\n \"\"\"\n if msg[\"t\"] != \"INTERACTION_CREATE\":\n return\n\n to_use = msg[\"d\"]\n\n if to_use[\"data\"][\"name\"] in self.commands:\n\n ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)\n cmd_name = to_use[\"data\"][\"name\"]\n\n if cmd_name not in self.commands and cmd_name in self.subcommands:\n return await self.handle_subcommand(ctx, to_use)\n\n selected_cmd = self.commands[to_use[\"data\"][\"name\"]]\n\n if selected_cmd.allowed_guild_ids:\n guild_id = ctx.guild.id if isinstance(ctx.guild, discord.Guild) else ctx.guild\n\n if guild_id not in selected_cmd.allowed_guild_ids:\n return\n\n if selected_cmd.has_subcommands and not selected_cmd.func:\n return await self.handle_subcommand(ctx, to_use)\n\n if \"options\" in to_use[\"data\"]:\n for x in to_use[\"data\"][\"options\"]:\n if \"value\" not in x:\n return await self.handle_subcommand(ctx, to_use)\n\n args = await self.process_options(ctx.guild, to_use[\"data\"][\"options\"], selected_cmd.auto_convert) \\\n if \"options\" in to_use[\"data\"] else []\n\n self._discord.dispatch(\"slash_command\", ctx)\n\n try:\n await selected_cmd.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n \"\"\"\n Coroutine for handling subcommand.\n\n .. warning::\n Do not manually call this.\n\n :param ctx: :class:`.model.SlashContext` instance.\n :param data: Gateway message.\n \"\"\"\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n sub_name = sub[\"name\"]\n if sub_name not in base:\n return\n ctx.subcommand = sub_name\n sub_opts = sub[\"options\"] if \"options\" in sub else []\n for x in sub_opts:\n if \"options\" in x or \"value\" not in x:\n sub_group = x[\"name\"]\n if sub_group not in base[sub_name]:\n return\n ctx.subcommand_group = sub_group\n selected = base[sub_name][sub_group]\n args = await self.process_options(ctx.guild, x[\"options\"], selected.auto_convert) \\\n if \"options\" in x else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n return\n selected = base[sub_name]\n args = await self.process_options(ctx.guild, sub_opts, selected.auto_convert) \\\n if \"options\" in sub else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def on_slash_command_error(self, ctx, ex):\n \"\"\"\n Handles Exception occurred from invoking command.\n\n Example of adding event:\n\n .. code-block:: python\n\n @client.event\n async def on_slash_command_error(ctx, ex):\n ...\n\n Example of adding listener:\n\n .. code-block:: python\n\n @bot.listen()\n async def on_slash_command_error(ctx, ex):\n ...\n\n :param ctx: Context of the command.\n :type ctx: :class:`.model.SlashContext`\n :param ex: Exception from the command invoke.\n :type ex: Exception\n :return:\n \"\"\"\n if self.has_listener:\n if self._discord.extra_events.get('on_slash_command_error'):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n if hasattr(self._discord, \"on_slash_command_error\"):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n # Prints exception if not overrided or has no listener for error.\n self.logger.exception(f\"An exception has occurred while executing command `{ctx.name}`:\")\n",
"path": "discord_slash/client.py"
}
] | [
{
"content": "import logging\nimport typing\nimport discord\nfrom inspect import iscoroutinefunction, getdoc\nfrom discord.ext import commands\nfrom . import http\nfrom . import model\nfrom . import error\nfrom . import context\nfrom .utils import manage_commands\n\n\nclass SlashCommand:\n \"\"\"\n Slash command extension class.\n\n :param client: discord.py Client or Bot instance.\n :type client: Union[discord.Client, discord.ext.commands.Bot]\n :param auto_register: Whether to register commands automatically. Default `False`.\n :type auto_register: bool\n :param override_type: Whether to override checking type of the client and try register event.\n :type override_type: bool\n\n :ivar _discord: Discord client of this client.\n :ivar commands: Dictionary of the registered commands via :func:`.slash` decorator.\n :ivar req: :class:`.http.SlashCommandRequest` of this client.\n :ivar logger: Logger of this client.\n :ivar auto_register: Whether to register commands automatically.\n :ivar auto_delete: Whether to delete commands not found in the project automatically.\n :ivar has_listener: Whether discord client has listener add function.\n \"\"\"\n\n def __init__(self,\n client: typing.Union[discord.Client, commands.Bot],\n auto_register: bool = False,\n auto_delete: bool = False,\n override_type: bool = False):\n self._discord = client\n self.commands = {}\n self.subcommands = {}\n self.logger = logging.getLogger(\"discord_slash\")\n self.req = http.SlashCommandRequest(self.logger, self._discord)\n self.auto_register = auto_register\n self.auto_delete = auto_delete\n\n if self.auto_register and self.auto_delete:\n self._discord.loop.create_task(self.sync_all_commands())\n elif self.auto_register:\n self._discord.loop.create_task(self.register_all_commands())\n elif self.auto_delete:\n self._discord.loop.create_task(self.delete_unused_commands())\n \n if not isinstance(client, commands.Bot) and not isinstance(client,\n commands.AutoShardedBot) and not override_type:\n self.logger.info(\"Detected discord.Client! Overriding on_socket_response.\")\n self._discord.on_socket_response = self.on_socket_response\n self.has_listener = False\n else:\n if not hasattr(self._discord, 'slash'):\n self._discord.slash = self\n else:\n raise error.DuplicateSlashClient(\"You can't have duplicate SlashCommand instances!\")\n \n self._discord.add_listener(self.on_socket_response)\n self.has_listener = True\n default_add_function = self._discord.add_cog\n def override_add_cog(cog: commands.Cog):\n default_add_function(cog)\n self.get_cog_commands(cog)\n self._discord.add_cog = override_add_cog\n default_remove_function = self._discord.remove_cog\n def override_remove_cog(name: str):\n cog = self._discord.get_cog(name)\n if cog is None:\n return\n self.remove_cog_commands(cog)\n default_remove_function(name)\n self._discord.remove_cog = override_remove_cog\n \n \n\n def get_cog_commands(self, cog: commands.Cog):\n \"\"\"\n Gets slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'): # Temporary warning\n return self.logger.warning(\"Calling get_cog_commands is no longer required \"\n \"to add cog slash commands. Make sure to remove all calls to this function.\")\n cog._slash_registered = True # Assuming all went well\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n x.cog = cog\n if isinstance(x, model.CogCommandObject):\n if x.name in self.commands:\n raise error.DuplicateCommand(x.name)\n self.commands[x.name] = x\n else:\n if x.base in self.commands:\n for i in self.commands[x.base].allowed_guild_ids:\n if i not in x.allowed_guild_ids:\n x.allowed_guild_ids.append(i)\n self.commands[x.base].has_subcommands = True\n else:\n _cmd = {\n \"func\": None,\n \"description\": x.base_description,\n \"auto_convert\": {},\n \"guild_ids\": x.allowed_guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n self.commands[x.base] = model.CommandObject(x.base, _cmd)\n if x.base not in self.subcommands:\n self.subcommands[x.base] = {}\n if x.subcommand_group:\n if x.subcommand_group not in self.subcommands[x.base]:\n self.subcommands[x.base][x.subcommand_group] = {}\n if x.name in self.subcommands[x.base][x.subcommand_group]:\n raise error.DuplicateCommand(f\"{x.base} {x.subcommand_group} {x.name}\")\n self.subcommands[x.base][x.subcommand_group][x.name] = x\n else:\n if x.name in self.subcommands[x.base]:\n raise error.DuplicateCommand(f\"{x.base} {x.name}\")\n self.subcommands[x.base][x.name] = x\n\n def remove_cog_commands(self, cog):\n \"\"\"\n Removes slash command from :class:`discord.ext.commands.Cog`.\n\n .. note::\n Since version ``1.0.9``, this gets called automatically during cog de-initialization.\n\n :param cog: Cog that has slash commands.\n :type cog: discord.ext.commands.Cog\n \"\"\"\n if hasattr(cog, '_slash_registered'):\n del cog._slash_registered\n func_list = [getattr(cog, x) for x in dir(cog)]\n res = [x for x in func_list if\n isinstance(x, (model.CogCommandObject, model.CogSubcommandObject))]\n for x in res:\n if isinstance(x, model.CogCommandObject):\n if x.name not in self.commands:\n continue # Just in case it is removed due to subcommand.\n if x.name in self.subcommands:\n self.commands[x.name].func = None\n continue # Let's remove completely when every subcommand is removed.\n del self.commands[x.name]\n else:\n if x.base not in self.subcommands:\n continue # Just in case...\n if x.subcommand_group:\n del self.subcommands[x.base][x.subcommand_group][x.name]\n if not self.subcommands[x.base][x.subcommand_group]:\n del self.subcommands[x.base][x.subcommand_group]\n else:\n del self.subcommands[x.base][x.name]\n if not self.subcommands[x.base]:\n del self.subcommands[x.base]\n if x.base in self.commands:\n if self.commands[x.base].func:\n self.commands[x.base].has_subcommands = False\n else:\n del self.commands[x.base]\n\n async def to_dict(self):\n \"\"\"\n Converts all commands currently registered to :class:`SlashCommand` to a dictionary.\n Returns a dictionary in the format:\n\n .. code-block:: python\n\n {\n \"global\" : [], # list of global commands\n \"guild\" : {\n 0000: [] # list of commands in the guild 0000\n }\n }\n\n Commands are in the format specified by discord `here <https://discord.com/developers/docs/interactions/slash-commands#applicationcommand>`_\n \"\"\"\n await self._discord.wait_until_ready() # In case commands are still not registered to SlashCommand.\n commands = {\n \"global\": [],\n \"guild\": {}\n }\n for x in self.commands:\n selected = self.commands[x]\n if selected.has_subcommands and selected.func:\n # Registering both subcommand and command with same base name / name\n # will result in only subcommand being registered,\n # so we will warn this at registering subcommands.\n self.logger.warning(f\"Detected command name with same subcommand base name! \"\n f\"This command will only have subcommand: {x}\")\n \n options = []\n if selected.has_subcommands:\n tgt = self.subcommands[x]\n for y in tgt:\n sub = tgt[y]\n if isinstance(sub, model.SubcommandObject):\n _dict = {\n \"name\": sub.name,\n \"description\": sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub.options or []\n }\n options.append(_dict)\n else:\n base_dict = {\n \"name\": y,\n \"description\": \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND_GROUP,\n \"options\": []\n }\n for z in sub:\n sub_sub = sub[z]\n _dict = {\n \"name\": sub_sub.name,\n \"description\": sub_sub.description or \"No Description.\",\n \"type\": model.SlashCommandOptionType.SUB_COMMAND,\n \"options\": sub_sub.options or []\n }\n base_dict[\"options\"].append(_dict)\n if sub_sub.subcommand_group_description:\n base_dict[\"description\"] = sub_sub.subcommand_group_description\n options.append(base_dict)\n\n command_dict = {\n \"name\": x,\n \"description\": selected.description or \"No Description.\",\n \"options\": selected.options if not options else options\n }\n if selected.allowed_guild_ids:\n for y in selected.allowed_guild_ids:\n try:\n commands[\"guild\"][y].append(command_dict)\n except KeyError:\n commands[\"guild\"][y] = [command_dict]\n else:\n commands[\"global\"].append(command_dict)\n\n return commands\n\n async def sync_all_commands(self, delete_from_unused_guilds = True):\n \"\"\"\n Matches commands registered on Discord to commands registered here.\n Deletes any commands on Discord but not here, and registers any not on Discord.\n This is done with a `put` request.\n If ``auto_register`` and ``auto_delete`` are ``True`` then this will be automatically called.\n\n :param delete_from_unused_guilds: If the bot should make a request to set no commands for guilds that haven't got any commands regestered in :class:``SlashCommand``\n \"\"\"\n commands = await self.to_dict()\n self.logger.info(\"Syncing commands...\")\n all_bot_guilds = [guild.id for guild in self._discord.guilds]\n # This is an extremly bad way to do this, because slash cmds can be in guilds the bot isn't in\n # But it's the only way until discord makes an endpoint to request all the guild with cmds registered.\n\n await self.req.put_slash_commands(slash_commands = commands[\"global\"], guild_id = None)\n \n for guild in commands[\"guild\"]:\n await self.req.put_slash_commands(slash_commands = commands[\"guild\"][guild], guild_id = guild)\n all_bot_guilds.remove(guild)\n if delete_from_unused_guilds:\n for guild in all_bot_guilds:\n await self.req.put_slash_commands(slash_commands=[], guild_id = guild)\n \n self.logger.info(\"Completed syncing all commands!\")\n\n async def register_all_commands(self):\n \"\"\"\n Registers all slash commands to Discord API.\\n\n If ``auto_register`` is ``True`` and ``auto_delete`` is ``False``, then this will be automatically called.\n \"\"\"\n self.logger.info(\"Registering commands...\")\n commands = await self.to_dict()\n for command in commands[\"global\"]:\n name = command.pop('name')\n self.logger.debug(f\"Registering global command {name}\")\n await self.req.add_slash_command(guild_id = None, cmd_name = name, **command)\n \n for guild in commands[\"guild\"]:\n guild_cmds = commands[\"guild\"][guild]\n for command in guild_cmds:\n name = command.pop('name')\n self.logger.debug(f\"Registering guild command {name} in guild: {guild}\")\n await self.req.add_slash_command(guild_id = guild, cmd_name = name, **command)\n self.logger.info(\"Completed registering all commands!\")\n\n async def delete_unused_commands(self):\n \"\"\"\n Unregisters all slash commands which are not used by the project to Discord API.\\n\n This might take some time because for every guild the bot is on an API call is made.\\n\n If ``auto_delete`` is ``True`` and ``auto_register`` is ``False``, then this will be automatically called.\n \"\"\"\n await self._discord.wait_until_ready()\n self.logger.info(\"Deleting unused commands...\")\n registered_commands = {}\n global_commands = await self.req.get_all_commands(None)\n\n for cmd in global_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": None}\n\n for guild in self._discord.guilds:\n # Since we can only get commands per guild we need to loop through every one\n try:\n guild_commands = await self.req.get_all_commands(guild.id)\n except discord.Forbidden:\n # In case a guild has not granted permissions to access commands\n continue\n\n for cmd in guild_commands:\n registered_commands[cmd[\"name\"]] = {\"id\": cmd[\"id\"], \"guild_id\": guild.id}\n\n for x in registered_commands:\n if x not in self.commands:\n # Delete command if not found locally\n selected = registered_commands[x]\n await self.req.remove_slash_command(selected[\"guild_id\"], selected[\"id\"])\n\n self.logger.info(\"Completed deleting unused commands!\")\n\n def add_slash_command(self,\n cmd,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None,\n has_subcommands: bool = False):\n \"\"\"\n Registers slash command to SlashCommand.\n\n :param cmd: Command Coroutine.\n :type cmd: Coroutine\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Defaults to command docstring or ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n :param has_subcommands: Whether it has subcommand. Default ``False``.\n :type has_subcommands: bool\n \"\"\"\n name = name or cmd.__name__\n name = name.lower()\n if name in self.commands:\n tgt = self.commands[name]\n if not tgt.has_subcommands:\n raise error.DuplicateCommand(name)\n has_subcommands = tgt.has_subcommands\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n description = description or getdoc(cmd)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": cmd,\n \"description\": description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options,\n \"has_subcommands\": has_subcommands\n }\n self.commands[name] = model.CommandObject(name, _cmd)\n self.logger.debug(f\"Added command `{name}`\")\n\n def add_subcommand(self,\n cmd,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n subcommand_group_description: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: list = None):\n \"\"\"\n Registers subcommand to SlashCommand.\n\n :param cmd: Subcommand Coroutine.\n :type cmd: Coroutine\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Defaults to command docstring or ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: list\n \"\"\"\n base = base.lower()\n subcommand_group = subcommand_group.lower() if subcommand_group else subcommand_group\n name = name or cmd.__name__\n name = name.lower()\n description = description or getdoc(cmd)\n\n if base in self.commands:\n tgt = self.commands[base]\n for x in tgt.allowed_guild_ids:\n if x not in guild_ids:\n guild_ids.append(x)\n\n if options is None:\n options = manage_commands.generate_options(cmd, description)\n\n if options:\n auto_convert = manage_commands.generate_auto_convert(options)\n\n _cmd = {\n \"func\": None,\n \"description\": base_description,\n \"auto_convert\": {},\n \"guild_ids\": guild_ids,\n \"api_options\": [],\n \"has_subcommands\": True\n }\n _sub = {\n \"func\": cmd,\n \"name\": name,\n \"description\": description,\n \"base_desc\": base_description,\n \"sub_group_desc\": subcommand_group_description,\n \"auto_convert\": auto_convert,\n \"guild_ids\": guild_ids,\n \"api_options\": options\n }\n if base not in self.commands:\n self.commands[base] = model.CommandObject(base, _cmd)\n else:\n self.commands[base].has_subcommands = True\n self.commands[base].allowed_guild_ids = guild_ids\n if self.commands[base].description:\n _cmd[\"description\"] = self.commands[base].description\n if base not in self.subcommands:\n self.subcommands[base] = {}\n if subcommand_group:\n if subcommand_group not in self.subcommands[base]:\n self.subcommands[base][subcommand_group] = {}\n if name in self.subcommands[base][subcommand_group]:\n raise error.DuplicateCommand(f\"{base} {subcommand_group} {name}\")\n self.subcommands[base][subcommand_group][name] = model.SubcommandObject(_sub, base, name, subcommand_group)\n else:\n if name in self.subcommands[base]:\n raise error.DuplicateCommand(f\"{base} {name}\")\n self.subcommands[base][name] = model.SubcommandObject(_sub, base, name)\n self.logger.debug(f\"Added subcommand `{base} {subcommand_group or ''} {name or cmd.__name__}`\")\n\n def slash(self,\n *,\n name: str = None,\n description: str = None,\n auto_convert: dict = None,\n guild_id: int = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers coroutine as a slash command.\\n\n All decorator args must be passed as keyword-only args.\\n\n 1 arg for command coroutine is required for ctx(:class:`.model.SlashContext`),\n and if your slash command has some args, then those args are also required.\\n\n All args must be passed as keyword-args.\n\n .. note::\n Role, User, and Channel types are passed as id if you don't set ``auto_convert``, since API doesn't give type of the option for now.\\n\n Also, if ``options`` is passed, then ``auto_convert`` will be automatically created or overrided.\n\n .. warning::\n Unlike discord.py's command, ``*args``, keyword-only args, converters, etc. are NOT supported.\n\n Example:\n\n .. code-block:: python\n\n @slash.slash(name=\"ping\")\n async def _slash(ctx): # Normal usage.\n await ctx.send(content=f\"Pong! (`{round(bot.latency*1000)}`ms)\")\n\n\n @slash.slash(name=\"pick\")\n async def _pick(ctx, choice1, choice2): # Command with 1 or more args.\n await ctx.send(content=str(random.choice([choice1, choice2])))\n\n Example of formatting ``auto_convert``:\n\n .. code-block:: python\n\n {\"option_role\": \"role\", # For key put name of the option and for value put type of the option.\n \"option_user\": SlashCommandOptionType.USER, # Also can use an enumeration member for the type\n \"option_user_two\": 6, # or number\n \"option_channel\": \"CHANNEL\"} # or upper case string.\n\n :param name: Name of the slash command. Default name of the coroutine.\n :type name: str\n :param description: Description of the slash command. Default ``None``.\n :type description: str\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_id: Deprecated. Use ``guild_ids`` instead.\n :type guild_id: int\n :param guild_ids: List of Guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the slash command. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n if guild_id:\n self.logger.warning(\"`guild_id` is deprecated! `Use guild_ids` instead.\")\n guild_ids = [guild_id]\n\n def wrapper(cmd):\n self.add_slash_command(cmd, name, description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n def subcommand(self,\n *,\n base,\n subcommand_group=None,\n name=None,\n description: str = None,\n base_description: str = None,\n base_desc: str = None,\n subcommand_group_description: str = None,\n sub_group_desc: str = None,\n auto_convert: dict = None,\n guild_ids: typing.List[int] = None,\n options: typing.List[dict] = None):\n \"\"\"\n Decorator that registers subcommand.\\n\n Unlike discord.py, you don't need base command.\\n\n All args must be passed as keyword-args.\n\n Example:\n\n .. code-block:: python\n\n # /group say <str>\n @slash.subcommand(base=\"group\", name=\"say\")\n async def _group_say(ctx, _str):\n await ctx.send(content=_str)\n\n # /group kick user <user>\n @slash.subcommand(base=\"group\",\n subcommand_group=\"kick\",\n name=\"user\",\n auto_convert={\"user\": \"user\"})\n async def _group_kick_user(ctx, user):\n ...\n\n :param base: Name of the base command.\n :type base: str\n :param subcommand_group: Name of the subcommand group, if any. Default ``None`` which represents there is no sub group.\n :type subcommand_group: str\n :param name: Name of the subcommand. Default name of the coroutine.\n :type name: str\n :param description: Description of the subcommand. Default ``None``.\n :type description: str\n :param base_description: Description of the base command. Default ``None``.\n :type base_description: str\n :param base_desc: Alias of ``base_description``.\n :param subcommand_group_description: Description of the subcommand_group. Default ``None``.\n :type subcommand_group_description: str\n :param sub_group_desc: Alias of ``subcommand_group_description``.\n :param auto_convert: Dictionary of how to convert option values. Default ``None``.\n :type auto_convert: dict\n :param guild_ids: List of guild ID of where the command will be used. Default ``None``, which will be global command.\n :type guild_ids: List[int]\n :param options: Options of the subcommand. This will affect ``auto_convert`` and command data at Discord API. Default ``None``.\n :type options: List[dict]\n \"\"\"\n base_description = base_description or base_desc\n subcommand_group_description = subcommand_group_description or sub_group_desc\n\n def wrapper(cmd):\n self.add_subcommand(cmd, base, subcommand_group, name, description, base_description, subcommand_group_description, auto_convert, guild_ids, options)\n return cmd\n\n return wrapper\n\n async def process_options(self, guild: discord.Guild, options: list, auto_convert: dict) -> list:\n \"\"\"\n Processes Role, User, and Channel option types to discord.py's models.\n\n :param guild: Guild of the command message.\n :type guild: discord.Guild\n :param options: Dict of options.\n :type options: list\n :param auto_convert: Dictionary of how to convert option values.\n :type auto_convert: dict\n :return: list\n \"\"\"\n if not guild:\n self.logger.info(\"This command invoke is missing guild. Skipping option process.\")\n return [x[\"value\"] for x in options]\n\n if not isinstance(guild, discord.Guild):\n return [x[\"value\"] for x in options]\n\n if not auto_convert:\n return [x[\"value\"] for x in options]\n\n converters = [\n [guild.get_member, guild.fetch_member],\n guild.get_channel,\n guild.get_role\n ]\n\n types = {\n \"user\": 0,\n \"USER\": 0,\n model.SlashCommandOptionType.USER: 0,\n \"6\": 0,\n 6: 0,\n \"channel\": 1,\n \"CHANNEL\": 1,\n model.SlashCommandOptionType.CHANNEL: 1,\n \"7\": 1,\n 7: 1,\n \"role\": 2,\n \"ROLE\": 2,\n model.SlashCommandOptionType.ROLE: 2,\n 8: 2,\n \"8\": 2\n }\n\n to_return = []\n\n for x in options:\n selected = x\n if selected[\"name\"] in auto_convert:\n if auto_convert[selected[\"name\"]] not in types:\n to_return.append(selected[\"value\"])\n continue\n loaded_converter = converters[types[auto_convert[selected[\"name\"]]]]\n if isinstance(loaded_converter, list):\n cache_first = loaded_converter[0](int(selected[\"value\"]))\n if cache_first:\n to_return.append(cache_first)\n continue\n loaded_converter = loaded_converter[1]\n try:\n to_return.append(await loaded_converter(int(selected[\"value\"]))) \\\n if iscoroutinefunction(loaded_converter) else \\\n to_return.append(loaded_converter(int(selected[\"value\"])))\n except (discord.Forbidden, discord.HTTPException):\n self.logger.warning(\"Failed fetching user! Passing ID instead.\")\n to_return.append(int(selected[\"value\"]))\n return to_return\n\n async def on_socket_response(self, msg):\n \"\"\"\n This event listener is automatically registered at initialization of this class.\n\n .. warning::\n DO NOT MANUALLY REGISTER, OVERRIDE, OR WHATEVER ACTION TO THIS COROUTINE UNLESS YOU KNOW WHAT YOU ARE DOING.\n\n :param msg: Gateway message.\n \"\"\"\n if msg[\"t\"] != \"INTERACTION_CREATE\":\n return\n\n to_use = msg[\"d\"]\n\n if to_use[\"data\"][\"name\"] in self.commands:\n\n ctx = context.SlashContext(self.req, to_use, self._discord, self.logger)\n cmd_name = to_use[\"data\"][\"name\"]\n\n if cmd_name not in self.commands and cmd_name in self.subcommands:\n return await self.handle_subcommand(ctx, to_use)\n\n selected_cmd = self.commands[to_use[\"data\"][\"name\"]]\n\n if selected_cmd.allowed_guild_ids:\n guild_id = ctx.guild.id if isinstance(ctx.guild, discord.Guild) else ctx.guild\n\n if guild_id not in selected_cmd.allowed_guild_ids:\n return\n\n if selected_cmd.has_subcommands and not selected_cmd.func:\n return await self.handle_subcommand(ctx, to_use)\n\n if \"options\" in to_use[\"data\"]:\n for x in to_use[\"data\"][\"options\"]:\n if \"value\" not in x:\n return await self.handle_subcommand(ctx, to_use)\n\n args = await self.process_options(ctx.guild, to_use[\"data\"][\"options\"], selected_cmd.auto_convert) \\\n if \"options\" in to_use[\"data\"] else []\n\n self._discord.dispatch(\"slash_command\", ctx)\n\n try:\n await selected_cmd.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def handle_subcommand(self, ctx: context.SlashContext, data: dict):\n \"\"\"\n Coroutine for handling subcommand.\n\n .. warning::\n Do not manually call this.\n\n :param ctx: :class:`.model.SlashContext` instance.\n :param data: Gateway message.\n \"\"\"\n if data[\"data\"][\"name\"] not in self.subcommands:\n return\n base = self.subcommands[data[\"data\"][\"name\"]]\n sub = data[\"data\"][\"options\"][0]\n sub_name = sub[\"name\"]\n if sub_name not in base:\n return\n ctx.subcommand = sub_name\n sub_opts = sub[\"options\"] if \"options\" in sub else []\n for x in sub_opts:\n if \"options\" in x or \"value\" not in x:\n sub_group = x[\"name\"]\n if sub_group not in base[sub_name]:\n return\n ctx.subcommand_group = sub_group\n selected = base[sub_name][sub_group]\n args = await self.process_options(ctx.guild, x[\"options\"], selected.auto_convert) \\\n if \"options\" in x else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n return\n selected = base[sub_name]\n args = await self.process_options(ctx.guild, sub_opts, selected.auto_convert) \\\n if \"options\" in sub else []\n self._discord.dispatch(\"slash_command\", ctx)\n try:\n await selected.invoke(ctx, *args)\n except Exception as ex:\n await self.on_slash_command_error(ctx, ex)\n\n async def on_slash_command_error(self, ctx, ex):\n \"\"\"\n Handles Exception occurred from invoking command.\n\n Example of adding event:\n\n .. code-block:: python\n\n @client.event\n async def on_slash_command_error(ctx, ex):\n ...\n\n Example of adding listener:\n\n .. code-block:: python\n\n @bot.listen()\n async def on_slash_command_error(ctx, ex):\n ...\n\n :param ctx: Context of the command.\n :type ctx: :class:`.model.SlashContext`\n :param ex: Exception from the command invoke.\n :type ex: Exception\n :return:\n \"\"\"\n if self.has_listener:\n if self._discord.extra_events.get('on_slash_command_error'):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n if hasattr(self._discord, \"on_slash_command_error\"):\n self._discord.dispatch(\"slash_command_error\", ctx, ex)\n return\n # Prints exception if not overrided or has no listener for error.\n self.logger.exception(f\"An exception has occurred while executing command `{ctx.name}`:\")\n",
"path": "discord_slash/client.py"
}
] | diff --git a/discord_slash/client.py b/discord_slash/client.py
index 1e55ddf6e..8818e166d 100644
--- a/discord_slash/client.py
+++ b/discord_slash/client.py
@@ -425,8 +425,8 @@ def add_subcommand(self,
name = name.lower()
description = description or getdoc(cmd)
- if name in self.commands:
- tgt = self.commands[name]
+ if base in self.commands:
+ tgt = self.commands[base]
for x in tgt.allowed_guild_ids:
if x not in guild_ids:
guild_ids.append(x)
| Subcommands are registered to guilds where there are not allowed
I've noticed that when I use `guild_ids` on a subcommand for a command which is shared between multiple guilds, that subcommand is registered to all guilds where one of its subcommands is allowed.
## Steps
1. Register a subcommand `s1` for command `c` for **Guild A**:
```python
@slash.subcommand(
base="c",
name="s1",
guilds_ids=[GUILD_A_ID],
)
def _handle(ctx):
# ....
```
1. Register a subcommand `s2` for command `c` for **Guild B**:
```python
@slash.subcommand(
base="c",
name="s2",
guilds_ids=[GUILD_A_ID],
)
def _handle(ctx):
# ....
```
## Expected behavior
**Guild A** has `/c s1` command only and **Guild B** has `/c s2` command only.
## Actual behavior
**Guild A** has `/c s1` and `/c s2` but only can use `/c s1`, and **Guild B** has `/c s1` and `/c s2` but only can use `/c s2`.
|
translate__pootle-5899 | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_app.project_tree import (does_not_exist, init_store_from_template,\n translation_project_dir_exists)\nfrom pootle_format.models import Format\nfrom pootle_language.models import Language\nfrom pootle_misc.checks import excluded_filters\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom pootle_store.constants import PARSED\nfrom pootle_store.util import absolute_real_path, relative_real_path\nfrom staticpages.models import StaticPage\n\n\ndef create_or_resurrect_translation_project(language, project):\n tp = create_translation_project(language, project)\n if tp is not None:\n if tp.directory.obsolete:\n tp.directory.obsolete = False\n tp.directory.save()\n logging.info(u\"Resurrected %s\", tp)\n else:\n logging.info(u\"Created %s\", tp)\n\n\ndef create_translation_project(language, project):\n if translation_project_dir_exists(language, project):\n try:\n translation_project, __ = TranslationProject.objects.all() \\\n .get_or_create(language=language, project=project)\n return translation_project\n except OSError:\n return None\n except IndexError:\n return None\n\n\ndef scan_translation_projects(languages=None, projects=None):\n project_query = Project.objects.all()\n\n if projects is not None:\n project_query = project_query.filter(code__in=projects)\n\n for project in project_query.iterator():\n if does_not_exist(project.get_real_path()):\n logging.info(u\"Disabling %s\", project)\n project.disabled = True\n project.save()\n else:\n lang_query = Language.objects.exclude(\n id__in=project.translationproject_set.live().values_list('language',\n flat=True))\n if languages is not None:\n lang_query = lang_query.filter(code__in=languages)\n\n for language in lang_query.iterator():\n create_or_resurrect_translation_project(language, project)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=True, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n real_path = models.FilePathField(editable=False, null=True, blank=True)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def abs_real_path(self):\n if self.real_path is not None:\n return absolute_real_path(self.real_path)\n\n @abs_real_path.setter\n def abs_real_path(self, value):\n if value is not None:\n self.real_path = relative_real_path(value)\n else:\n self.real_path = None\n\n @property\n def file_style(self):\n return self.project.get_treestyle()\n\n @property\n def checker(self):\n from translate.filters import checks\n # We do not use default Translate Toolkit checkers; instead use\n # our own one\n if settings.POOTLE_QUALITY_CHECKER:\n from pootle_misc.util import import_func\n checkerclasses = [import_func(settings.POOTLE_QUALITY_CHECKER)]\n else:\n checkerclasses = [\n checks.projectcheckers.get(self.project.checkstyle,\n checks.StandardChecker)\n ]\n\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=excluded_filters,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @property\n def is_template_project(self):\n return self == self.project.get_template_translationproject()\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = self.language.directory \\\n .get_or_make_subdir(self.project.code)\n self.pootle_path = self.directory.pootle_path\n\n if self.project.treestyle != 'pootle_fs':\n from pootle_app.project_tree import get_translation_project_dir\n self.abs_real_path = get_translation_project_dir(\n self.language, self.project, self.file_style, make_dirs=not\n self.directory.obsolete)\n else:\n self.abs_real_path = None\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logging.error(u\"Error in filter %s: %r, %r, %s\", functionname, str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n template_tp = self.project.get_template_translationproject()\n return (\n not self.is_template_project\n and template_tp is not None\n and not translation_project_dir_exists(self.language,\n self.project))\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n\n template_tp = self.project.get_template_translationproject()\n template_stores = template_tp.stores.live().exclude(file=\"\")\n\n for template_store in template_stores.iterator():\n init_store_from_template(self, template_store)\n\n self.update_from_disk()\n\n def update_from_disk(self, force=False, overwrite=False):\n \"\"\"Update all stores to reflect state on disk.\"\"\"\n changed = False\n\n logging.info(u\"Scanning for new files in %s\", self)\n # Create new, make obsolete in-DB stores to reflect state on disk\n self.scan_files()\n\n stores = self.stores.live().select_related('parent').exclude(file='')\n # Update store content from disk store\n for store in stores.iterator():\n if not store.file:\n continue\n disk_mtime = store.get_file_mtime()\n if not force and disk_mtime == store.file_mtime:\n # The file on disk wasn't changed since the last sync\n logging.debug(u\"File didn't change since last sync, \"\n u\"skipping %s\", store.pootle_path)\n continue\n\n changed = (\n store.updater.update_from_disk(overwrite=overwrite)\n or changed)\n\n return changed\n\n def sync(self, conservative=True, skip_missing=False, only_newer=True):\n \"\"\"Sync unsaved work on all stores to disk\"\"\"\n stores = self.stores.live().exclude(file='').filter(state__gte=PARSED)\n for store in stores.select_related(\"parent\").iterator():\n store.sync(update_structure=not conservative,\n conservative=conservative,\n skip_missing=skip_missing, only_newer=only_newer)\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n\n # # # /TreeItem\n\n def directory_exists_on_disk(self):\n \"\"\"Checks if the actual directory for the translation project\n exists on disk.\n \"\"\"\n return not does_not_exist(self.abs_real_path)\n\n def scan_files(self):\n \"\"\"Scans the file system and returns a list of translation files.\n \"\"\"\n projects = [p.strip() for p in self.project.ignoredfiles.split(',')]\n ignored_files = set(projects)\n\n filetypes = self.project.filetype_tool\n exts = filetypes.filetype_extensions\n\n # Scan for pots if template project\n if self.is_template_project:\n exts = filetypes.template_extensions\n\n from pootle_app.project_tree import (add_files,\n match_template_filename,\n direct_language_match_filename)\n\n all_files = []\n new_files = []\n\n if self.file_style == 'gnu':\n if self.pootle_path.startswith('/templates/'):\n file_filter = lambda filename: match_template_filename(\n self.project, filename,)\n else:\n file_filter = lambda filename: direct_language_match_filename(\n self.language.code, filename,)\n else:\n file_filter = lambda filename: True\n\n all_files, new_files, __ = add_files(\n self,\n ignored_files,\n exts,\n self.real_path,\n self.directory,\n file_filter,\n )\n\n return all_files, new_files\n\n ###########################################################################\n\n\n@receiver(post_save, sender=Project)\ndef scan_languages(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw or instance.disabled:\n return\n\n if not instance.filetypes.all().exists():\n instance.filetypes.add(Format.objects.get(name=\"po\"))\n\n if instance.treestyle == 'pootle_fs':\n return\n\n for language in Language.objects.iterator():\n tp = create_translation_project(language, instance)\n if tp is not None:\n tp.update_from_disk()\n\n\n@receiver(post_save, sender=Language)\ndef scan_projects(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw:\n return\n\n for project in Project.objects.enabled().iterator():\n tp = create_translation_project(instance, project)\n if tp is not None:\n tp.update_from_disk()\n",
"path": "pootle/apps/pootle_translationproject/models.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Copyright (C) Pootle contributors.\n#\n# This file is a part of the Pootle project. It is distributed under the GPL3\n# or later license. See the LICENSE file for a copy of the license and the\n# AUTHORS file for copyright and authorship information.\n\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.fields import GenericRelation\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.urls import reverse\nfrom django.utils.functional import cached_property\n\nfrom pootle.core.delegate import data_tool\nfrom pootle.core.mixins import CachedTreeItem\nfrom pootle.core.url_helpers import get_editor_filter, split_pootle_path\nfrom pootle_app.models.directory import Directory\nfrom pootle_app.project_tree import (does_not_exist, init_store_from_template,\n translation_project_dir_exists)\nfrom pootle_format.models import Format\nfrom pootle_language.models import Language\nfrom pootle_misc.checks import excluded_filters\nfrom pootle_project.models import Project\nfrom pootle_revision.models import Revision\nfrom pootle_store.constants import PARSED\nfrom pootle_store.util import absolute_real_path, relative_real_path\nfrom staticpages.models import StaticPage\n\n\ndef create_or_resurrect_translation_project(language, project):\n tp = create_translation_project(language, project)\n if tp is not None:\n if tp.directory.obsolete:\n tp.directory.obsolete = False\n tp.directory.save()\n logging.info(u\"Resurrected %s\", tp)\n else:\n logging.info(u\"Created %s\", tp)\n\n\ndef create_translation_project(language, project):\n if translation_project_dir_exists(language, project):\n try:\n translation_project, __ = TranslationProject.objects.all() \\\n .get_or_create(language=language, project=project)\n return translation_project\n except OSError:\n return None\n except IndexError:\n return None\n\n\ndef scan_translation_projects(languages=None, projects=None):\n project_query = Project.objects.all()\n\n if projects is not None:\n project_query = project_query.filter(code__in=projects)\n\n for project in project_query.iterator():\n if does_not_exist(project.get_real_path()):\n logging.info(u\"Disabling %s\", project)\n project.disabled = True\n project.save()\n else:\n lang_query = Language.objects.exclude(\n id__in=project.translationproject_set.live().values_list('language',\n flat=True))\n if languages is not None:\n lang_query = lang_query.filter(code__in=languages)\n\n for language in lang_query.iterator():\n create_or_resurrect_translation_project(language, project)\n\n\nclass TranslationProjectManager(models.Manager):\n\n def get_terminology_project(self, language_id):\n # FIXME: the code below currently uses the same approach to determine\n # the 'terminology' kind of a project as 'Project.is_terminology()',\n # which means it checks the value of 'checkstyle' field\n # (see pootle_project/models.py:240).\n #\n # This should probably be replaced in the future with a dedicated\n # project property.\n return self.get(language=language_id,\n project__checkstyle='terminology')\n\n def live(self):\n \"\"\"Filters translation projects that have non-obsolete directories.\"\"\"\n return self.filter(directory__obsolete=False)\n\n def for_user(self, user, select_related=None):\n \"\"\"Filters translation projects for a specific user.\n\n - Admins always get all translation projects.\n - Regular users only get enabled translation projects\n accessible to them.\n\n :param user: The user for whom the translation projects need to be\n retrieved for.\n :return: A filtered queryset with `TranslationProject`s for `user`.\n \"\"\"\n qs = self.live()\n if select_related is not None:\n qs = qs.select_related(*select_related)\n\n if user.is_superuser:\n return qs\n\n return qs.filter(\n project__disabled=False,\n project__code__in=Project.accessible_by_user(user))\n\n def get_for_user(self, user, project_code, language_code,\n select_related=None):\n \"\"\"Gets a `language_code`/`project_code` translation project\n for a specific `user`.\n\n - Admins can get the translation project even\n if its project is disabled.\n - Regular users only get a translation project\n if its project isn't disabled and it is accessible to them.\n\n :param user: The user for whom the translation project needs\n to be retrieved.\n :param project_code: The code of a project for the TP to retrieve.\n :param language_code: The code of the language fro the TP to retrieve.\n :return: The `TranslationProject` matching the params, raises\n otherwise.\n \"\"\"\n return self.for_user(\n user, select_related).get(\n project__code=project_code,\n language__code=language_code)\n\n\nclass TranslationProject(models.Model, CachedTreeItem):\n\n language = models.ForeignKey(\n Language, db_index=True, on_delete=models.CASCADE)\n project = models.ForeignKey(\n Project, db_index=True, on_delete=models.CASCADE)\n real_path = models.FilePathField(editable=False, null=True, blank=True)\n directory = models.OneToOneField(\n Directory, db_index=True, editable=False, on_delete=models.CASCADE)\n pootle_path = models.CharField(max_length=255, null=False, unique=True,\n db_index=True, editable=False)\n creation_time = models.DateTimeField(auto_now_add=True, db_index=True,\n editable=False, null=True)\n revisions = GenericRelation(Revision)\n\n objects = TranslationProjectManager()\n\n class Meta(object):\n unique_together = (\n ('language', 'project'),\n ('project', 'language'))\n db_table = 'pootle_app_translationproject'\n # disabled objects are hidden for related objects too\n base_manager_name = 'objects'\n\n @cached_property\n def code(self):\n return u'-'.join([self.language.code, self.project.code])\n\n @cached_property\n def data_tool(self):\n return data_tool.get(self.__class__)(self)\n\n # # # # # # # # # # # # # # Properties # # # # # # # # # # # # # # # # # #\n\n @property\n def name(self):\n # TODO: See if `self.fullname` can be removed\n return self.fullname\n\n @property\n def fullname(self):\n return \"%s [%s]\" % (self.project.fullname, self.language.name)\n\n @property\n def abs_real_path(self):\n if self.real_path is not None:\n return absolute_real_path(self.real_path)\n\n @abs_real_path.setter\n def abs_real_path(self, value):\n if value is not None:\n self.real_path = relative_real_path(value)\n else:\n self.real_path = None\n\n @property\n def file_style(self):\n return self.project.get_treestyle()\n\n @property\n def checker(self):\n from translate.filters import checks\n # We do not use default Translate Toolkit checkers; instead use\n # our own one\n if settings.POOTLE_QUALITY_CHECKER:\n from pootle_misc.util import import_func\n checkerclasses = [import_func(settings.POOTLE_QUALITY_CHECKER)]\n else:\n checkerclasses = [\n checks.projectcheckers.get(self.project.checkstyle,\n checks.StandardChecker)\n ]\n\n return checks.TeeChecker(checkerclasses=checkerclasses,\n excludefilters=excluded_filters,\n errorhandler=self.filtererrorhandler,\n languagecode=self.language.code)\n\n @property\n def disabled(self):\n return self.project.disabled\n\n @property\n def is_template_project(self):\n return self == self.project.get_template_translationproject()\n\n # # # # # # # # # # # # # # Methods # # # # # # # # # # # # # # # # # # #\n\n def __unicode__(self):\n return self.pootle_path\n\n def __init__(self, *args, **kwargs):\n super(TranslationProject, self).__init__(*args, **kwargs)\n\n def save(self, *args, **kwargs):\n self.directory = self.language.directory \\\n .get_or_make_subdir(self.project.code)\n self.pootle_path = self.directory.pootle_path\n\n if self.project.treestyle != 'pootle_fs':\n from pootle_app.project_tree import get_translation_project_dir\n self.abs_real_path = get_translation_project_dir(\n self.language, self.project, self.file_style, make_dirs=not\n self.directory.obsolete)\n else:\n self.abs_real_path = None\n super(TranslationProject, self).save(*args, **kwargs)\n if self.directory.tp_id != self.pk:\n self.directory.tp = self\n self.directory.save()\n\n def delete(self, *args, **kwargs):\n directory = self.directory\n\n super(TranslationProject, self).delete(*args, **kwargs)\n directory.delete()\n\n def get_absolute_url(self):\n return reverse(\n 'pootle-tp-browse',\n args=split_pootle_path(self.pootle_path)[:-1])\n\n def get_translate_url(self, **kwargs):\n return u''.join(\n [reverse(\"pootle-tp-translate\",\n args=split_pootle_path(self.pootle_path)[:-1]),\n get_editor_filter(**kwargs)])\n\n def get_announcement(self, user=None):\n \"\"\"Return the related announcement, if any.\"\"\"\n return StaticPage.get_announcement_for(self.pootle_path, user)\n\n def filtererrorhandler(self, functionname, str1, str2, e):\n logging.error(u\"Error in filter %s: %r, %r, %s\", functionname, str1,\n str2, e)\n return False\n\n def is_accessible_by(self, user):\n \"\"\"Returns `True` if the current translation project is accessible\n by `user`.\n \"\"\"\n if user.is_superuser:\n return True\n\n return self.project.code in Project.accessible_by_user(user)\n\n def can_be_inited_from_templates(self):\n \"\"\"Returns `True` if the current translation project hasn't been\n saved yet and can be initialized from templates.\n \"\"\"\n\n # This method checks if the current translation project directory\n # doesn't exist. So it won't work if the translation project is already\n # saved the database because the translation project directory is\n # auto-created in `save()` method.\n template_tp = self.project.get_template_translationproject()\n return (\n not self.is_template_project\n and template_tp is not None\n and not translation_project_dir_exists(self.language,\n self.project))\n\n def init_from_templates(self):\n \"\"\"Initializes the current translation project files using\n the templates TP ones.\n \"\"\"\n\n template_tp = self.project.get_template_translationproject()\n template_stores = template_tp.stores.live().exclude(file=\"\")\n\n for template_store in template_stores.iterator():\n init_store_from_template(self, template_store)\n\n self.update_from_disk()\n\n def update_from_disk(self, force=False, overwrite=False):\n \"\"\"Update all stores to reflect state on disk.\"\"\"\n changed = False\n\n logging.info(u\"Scanning for new files in %s\", self)\n # Create new, make obsolete in-DB stores to reflect state on disk\n self.scan_files()\n\n stores = self.stores.live().select_related('parent').exclude(file='')\n # Update store content from disk store\n for store in stores.iterator():\n if not store.file:\n continue\n disk_mtime = store.get_file_mtime()\n if not force and disk_mtime == store.file_mtime:\n # The file on disk wasn't changed since the last sync\n logging.debug(u\"File didn't change since last sync, \"\n u\"skipping %s\", store.pootle_path)\n continue\n\n changed = (\n store.updater.update_from_disk(overwrite=overwrite)\n or changed)\n\n return changed\n\n def sync(self, conservative=True, skip_missing=False, only_newer=True):\n \"\"\"Sync unsaved work on all stores to disk\"\"\"\n stores = self.stores.live().exclude(file='').filter(state__gte=PARSED)\n for store in stores.select_related(\"parent\").iterator():\n store.sync(update_structure=not conservative,\n conservative=conservative,\n skip_missing=skip_missing, only_newer=only_newer)\n\n # # # TreeItem\n def get_children(self):\n return self.directory.children\n\n def get_parents(self):\n return [self.project]\n\n # # # /TreeItem\n\n def directory_exists_on_disk(self):\n \"\"\"Checks if the actual directory for the translation project\n exists on disk.\n \"\"\"\n return not does_not_exist(self.abs_real_path)\n\n def scan_files(self):\n \"\"\"Scans the file system and returns a list of translation files.\n \"\"\"\n projects = [p.strip() for p in self.project.ignoredfiles.split(',')]\n ignored_files = set(projects)\n\n filetypes = self.project.filetype_tool\n exts = filetypes.filetype_extensions\n\n # Scan for pots if template project\n if self.is_template_project:\n exts = filetypes.template_extensions\n\n from pootle_app.project_tree import (add_files,\n match_template_filename,\n direct_language_match_filename)\n\n all_files = []\n new_files = []\n\n if self.file_style == 'gnu':\n if self.pootle_path.startswith('/templates/'):\n file_filter = lambda filename: match_template_filename(\n self.project, filename,)\n else:\n file_filter = lambda filename: direct_language_match_filename(\n self.language.code, filename,)\n else:\n file_filter = lambda filename: True\n\n all_files, new_files, __ = add_files(\n self,\n ignored_files,\n exts,\n self.real_path,\n self.directory,\n file_filter,\n )\n\n return all_files, new_files\n\n ###########################################################################\n\n\n@receiver(post_save, sender=Project)\ndef scan_languages(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw or instance.disabled:\n return\n\n if not instance.filetypes.all().exists():\n instance.filetypes.add(Format.objects.get(name=\"po\"))\n\n if instance.treestyle == 'pootle_fs':\n return\n\n for language in Language.objects.iterator():\n tp = create_translation_project(language, instance)\n if tp is not None:\n tp.update_from_disk()\n\n\n@receiver(post_save, sender=Language)\ndef scan_projects(**kwargs):\n instance = kwargs[\"instance\"]\n created = kwargs.get(\"created\", False)\n raw = kwargs.get(\"raw\", False)\n\n if not created or raw:\n return\n\n old_style_projects = Project.objects.enabled().exclude(\n treestyle=\"pootle_fs\")\n\n for project in old_style_projects.iterator():\n tp = create_translation_project(instance, project)\n if tp is not None:\n tp.update_from_disk()\n",
"path": "pootle/apps/pootle_translationproject/models.py"
}
] | diff --git a/pootle/apps/pootle_translationproject/models.py b/pootle/apps/pootle_translationproject/models.py
index efcf865375f..496d5e89ac0 100644
--- a/pootle/apps/pootle_translationproject/models.py
+++ b/pootle/apps/pootle_translationproject/models.py
@@ -438,7 +438,10 @@ def scan_projects(**kwargs):
if not created or raw:
return
- for project in Project.objects.enabled().iterator():
+ old_style_projects = Project.objects.enabled().exclude(
+ treestyle="pootle_fs")
+
+ for project in old_style_projects.iterator():
tp = create_translation_project(instance, project)
if tp is not None:
tp.update_from_disk()
| Add language on pootle_fs/xliff support
Ubuntu 16.04
Pootle 2.8.0b3
Have succesfully created a pootle_fs project.
Based on xliff, I put a file with my GNU style "messages.<language_code>.<ext>"
I tried french and templates as language, both are taken, uploaded and I can translate.
But when I add any language, my RQworker get me this error in both language :
```
[2016-09-13T10:13:35] default: pootle_project.forms.update_translation_project(<TranslationProject: /sq/messages/>, True, 'http://localhost:8000/') (cfeaa7eb-99ae-4e4f-bbab-3cfa0e96d436)
2016-09-13 10:13:55,524 INFO Scanning for new files in /sq/messages/
[2016-09-13T10:14:05] AttributeError: 'NoneType' object has no attribute 'startswith'
Traceback (most recent call last):
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py", line 45, in update_translation_project
raise e
AttributeError: 'NoneType' object has no attribute 'startswith'
Traceback (most recent call last):
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/worker.py", line 568, in perform_job
rv = job.perform()
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/rq/job.py", line 495, in perform
self._result = self.func(*self.args, **self.kwargs)
File "/home/valentin/dev/pootle/env/local/lib/python2.7/site-packages/pootle/apps/pootle_project/forms.py", line 45, in update_translation_project
raise e
AttributeError: 'NoneType' object has no attribute 'startswith'
```
|
nonebot__nonebot2-430 | [
{
"content": "from datetime import datetime, timedelta\nfrom io import BytesIO\nfrom ipaddress import IPv4Address\nfrom typing import Any, Dict, List, NoReturn, Optional, Tuple, Union\n\nimport httpx\n\nfrom nonebot.config import Config\nfrom nonebot.typing import overrides\nfrom nonebot.adapters import Bot as BaseBot\nfrom nonebot.exception import ApiNotAvailable\nfrom nonebot.drivers import Driver, HTTPConnection, HTTPResponse, WebSocket\n\nfrom .config import Config as MiraiConfig\nfrom .event import Event, FriendMessage, GroupMessage, TempMessage\nfrom .message import MessageChain, MessageSegment\nfrom .utils import Log, argument_validation, catch_network_error, process_event\n\n\nclass SessionManager:\n \"\"\"Bot会话管理器, 提供API主动调用接口\"\"\"\n sessions: Dict[int, Tuple[str, datetime, httpx.AsyncClient]] = {}\n session_expiry: timedelta = timedelta(minutes=15)\n\n def __init__(self, session_key: str, client: httpx.AsyncClient):\n self.session_key, self.client = session_key, client\n\n @catch_network_error\n async def post(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :说明:\n\n 以POST方式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)\n\n :返回:\n\n - ``Dict[str, Any]``: API 返回值\n \"\"\"\n response = await self.client.post(\n path,\n json={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def request(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :说明:\n\n 以GET方式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)\n \"\"\"\n response = await self.client.get(\n path,\n params={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:\n \"\"\"\n :说明:\n\n 以表单(``multipart/form-data``)形式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Dict[str, Any]``: 请求参数 (无需sessionKey)\n \"\"\"\n files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}\n form = {k: v for k, v in params.items() if k not in files}\n response = await self.client.post(\n path,\n data=form,\n files=files,\n timeout=6,\n )\n response.raise_for_status()\n return response.json()\n\n @classmethod\n async def new(cls, self_id: int, *, host: IPv4Address, port: int,\n auth_key: str) -> \"SessionManager\":\n session = cls.get(self_id)\n if session is not None:\n return session\n\n client = httpx.AsyncClient(base_url=f'http://{host}:{port}')\n response = await client.post('/auth', json={'authKey': auth_key})\n response.raise_for_status()\n auth = response.json()\n assert auth['code'] == 0\n session_key = auth['session']\n response = await client.post('/verify',\n json={\n 'sessionKey': session_key,\n 'qq': self_id\n })\n assert response.json()['code'] == 0\n cls.sessions[self_id] = session_key, datetime.now(), client\n\n return cls(session_key, client)\n\n @classmethod\n def get(cls,\n self_id: int,\n check_expire: bool = True) -> Optional[\"SessionManager\"]:\n if self_id not in cls.sessions:\n return None\n key, time, client = cls.sessions[self_id]\n if check_expire and (datetime.now() - time > cls.session_expiry):\n return None\n return cls(key, client)\n\n\nclass Bot(BaseBot):\n r\"\"\"\n mirai-api-http 协议 Bot 适配。\n\n \\:\\:\\: warning\n API中为了使代码更加整洁, 我们采用了与PEP8相符的命名规则取代Mirai原有的驼峰命名\n\n 部分字段可能与文档在符号上不一致\n \\:\\:\\:\n\n \"\"\"\n\n @property\n @overrides(BaseBot)\n def type(self) -> str:\n return \"mirai\"\n\n @property\n def alive(self) -> bool:\n assert isinstance(self.request, WebSocket)\n return not self.request.closed\n\n @property\n def api(self) -> SessionManager:\n \"\"\"返回该Bot对象的会话管理实例以提供API主动调用\"\"\"\n api = SessionManager.get(self_id=int(self.self_id))\n assert api is not None, 'SessionManager has not been initialized'\n return api\n\n @classmethod\n @overrides(BaseBot)\n async def check_permission(\n cls, driver: Driver,\n request: HTTPConnection) -> Tuple[Optional[str], HTTPResponse]:\n if isinstance(request, WebSocket):\n return None, HTTPResponse(\n 501, b'Websocket connection is not implemented')\n self_id: Optional[str] = request.headers.get('bot')\n if self_id is None:\n return None, HTTPResponse(400, b'Header `Bot` is required.')\n self_id = str(self_id).strip()\n await SessionManager.new(\n int(self_id),\n host=cls.mirai_config.host, # type: ignore\n port=cls.mirai_config.port, #type: ignore\n auth_key=cls.mirai_config.auth_key) # type: ignore\n return self_id, HTTPResponse(204, b'')\n\n @classmethod\n @overrides(BaseBot)\n def register(cls, driver: Driver, config: \"Config\"):\n cls.mirai_config = MiraiConfig(**config.dict())\n if (cls.mirai_config.auth_key and cls.mirai_config.host and\n cls.mirai_config.port) is None:\n raise ApiNotAvailable('mirai')\n super().register(driver, config)\n\n @overrides(BaseBot)\n async def handle_message(self, message: dict):\n Log.debug(f'received message {message}')\n try:\n await process_event(\n bot=self,\n event=Event.new({\n **message,\n 'self_id': self.self_id,\n }),\n )\n except Exception as e:\n Log.error(f'Failed to handle message: {message}', e)\n\n @overrides(BaseBot)\n async def _call_api(self, api: str, **data) -> NoReturn:\n raise NotImplementedError\n\n @overrides(BaseBot)\n async def call_api(self, api: str, **data) -> NoReturn:\n r\"\"\"\n \\:\\:\\: danger\n 由于Mirai的HTTP API特殊性, 该API暂时无法实现\n \\:\\:\\:\n\n \\:\\:\\: tip\n 你可以使用 ``MiraiBot.api`` 中提供的调用方法来代替\n \\:\\:\\:\n \"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n def __getattr__(self, key: str) -> NoReturn:\n \"\"\"由于Mirai的HTTP API特殊性, 该API暂时无法实现\"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n @argument_validation\n async def send(self,\n event: Event,\n message: Union[MessageChain, MessageSegment, str],\n at_sender: bool = False):\n \"\"\"\n :说明:\n\n 根据 ``event`` 向触发事件的主体发送信息\n\n :参数:\n\n * ``event: Event``: Event对象\n * ``message: Union[MessageChain, MessageSegment, str]``: 要发送的消息\n * ``at_sender: bool``: 是否 @ 事件主体\n \"\"\"\n if not isinstance(message, MessageChain):\n message = MessageChain(message)\n if isinstance(event, FriendMessage):\n return await self.send_friend_message(target=event.sender.id,\n message_chain=message)\n elif isinstance(event, GroupMessage):\n if at_sender:\n message = MessageSegment.at(event.sender.id) + message\n return await self.send_group_message(group=event.sender.group.id,\n message_chain=message)\n elif isinstance(event, TempMessage):\n return await self.send_temp_message(qq=event.sender.id,\n group=event.sender.group.id,\n message_chain=message)\n else:\n raise ValueError(f'Unsupported event type {event!r}.')\n\n @argument_validation\n async def send_friend_message(self, target: int,\n message_chain: MessageChain):\n \"\"\"\n :说明:\n\n 使用此方法向指定好友发送消息\n\n :参数:\n\n * ``target: int``: 发送消息目标好友的 QQ 号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n \"\"\"\n return await self.api.post('sendFriendMessage',\n params={\n 'target': target,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_temp_message(self, qq: int, group: int,\n message_chain: MessageChain):\n \"\"\"\n :说明:\n\n 使用此方法向临时会话对象发送消息\n\n :参数:\n\n * ``qq: int``: 临时会话对象 QQ 号\n * ``group: int``: 临时会话群号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n \"\"\"\n return await self.api.post('sendTempMessage',\n params={\n 'qq': qq,\n 'group': group,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_group_message(self,\n group: int,\n message_chain: MessageChain,\n quote: Optional[int] = None):\n \"\"\"\n :说明:\n\n 使用此方法向指定群发送消息\n\n :参数:\n\n * ``group: int``: 发送消息目标群的群号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n * ``quote: Optional[int]``: 引用一条消息的 message_id 进行回复\n \"\"\"\n return await self.api.post('sendGroupMessage',\n params={\n 'group': group,\n 'messageChain': message_chain.export(),\n 'quote': quote\n })\n\n @argument_validation\n async def recall(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法撤回指定消息。对于bot发送的消息,有2分钟时间限制。对于撤回群聊中群员的消息,需要有相应权限\n\n :参数:\n\n * ``target: int``: 需要撤回的消息的message_id\n \"\"\"\n return await self.api.post('recall', params={'target': target})\n\n @argument_validation\n async def send_image_message(self, target: int, qq: int, group: int,\n urls: List[str]) -> List[str]:\n \"\"\"\n :说明:\n\n 使用此方法向指定对象(群或好友)发送图片消息\n 除非需要通过此手段获取image_id,否则不推荐使用该接口\n\n > 当qq和group同时存在时,表示发送临时会话图片,qq为临时会话对象QQ号,group为临时会话发起的群号\n\n :参数:\n\n * ``target: int``: 发送对象的QQ号或群号,可能存在歧义\n * ``qq: int``: 发送对象的QQ号\n * ``group: int``: 发送对象的群号\n * ``urls: List[str]``: 是一个url字符串构成的数组\n\n :返回:\n\n - ``List[str]``: 一个包含图片imageId的数组\n \"\"\"\n return await self.api.post('sendImageMessage',\n params={\n 'target': target,\n 'qq': qq,\n 'group': group,\n 'urls': urls\n })\n\n @argument_validation\n async def upload_image(self, type: str, img: BytesIO):\n \"\"\"\n :说明:\n\n 使用此方法上传图片文件至服务器并返回Image_id\n\n :参数:\n\n * ``type: str``: \"friend\" 或 \"group\" 或 \"temp\"\n * ``img: BytesIO``: 图片的BytesIO对象\n \"\"\"\n return await self.api.upload('uploadImage',\n params={\n 'type': type,\n 'img': img\n })\n\n @argument_validation\n async def upload_voice(self, type: str, voice: BytesIO):\n \"\"\"\n :说明:\n\n 使用此方法上传语音文件至服务器并返回voice_id\n\n :参数:\n\n * ``type: str``: 当前仅支持 \"group\"\n * ``voice: BytesIO``: 语音的BytesIO对象\n \"\"\"\n return await self.api.upload('uploadVoice',\n params={\n 'type': type,\n 'voice': voice\n })\n\n @argument_validation\n async def fetch_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最老消息和最老各类事件\n (会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('fetchMessage', params={'count': count})\n\n @argument_validation\n async def fetch_latest_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最新消息和最新各类事件\n (会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('fetchLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def peek_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最老消息和最老各类事件\n (不会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('peekMessage', params={'count': count})\n\n @argument_validation\n async def peek_latest_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最新消息和最新各类事件\n (不会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('peekLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def messsage_from_id(self, id: int):\n \"\"\"\n :说明:\n\n 通过messageId获取一条被缓存的消息\n 使用此方法获取bot接收到的消息和各类事件\n\n :参数:\n\n * ``id: int``: 获取消息的message_id\n \"\"\"\n return await self.api.request('messageFromId', params={'id': id})\n\n @argument_validation\n async def count_message(self):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收并缓存的消息总数,注意不包含被删除的\n \"\"\"\n return await self.api.request('countMessage')\n\n @argument_validation\n async def friend_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot的好友列表\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的好友列表数据\n \"\"\"\n return await self.api.request('friendList')\n\n @argument_validation\n async def group_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot的群列表\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的群列表数据\n \"\"\"\n return await self.api.request('groupList')\n\n @argument_validation\n async def member_list(self, target: int) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot指定群种的成员列表\n\n :参数:\n\n * ``target: int``: 指定群的群号\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的群成员列表数据\n \"\"\"\n return await self.api.request('memberList', params={'target': target})\n\n @argument_validation\n async def mute(self, target: int, member_id: int, time: int):\n \"\"\"\n :说明:\n\n 使用此方法指定群禁言指定群员(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n * ``time: int``: 禁言时长,单位为秒,最多30天\n \"\"\"\n return await self.api.post('mute',\n params={\n 'target': target,\n 'memberId': member_id,\n 'time': time\n })\n\n @argument_validation\n async def unmute(self, target: int, member_id: int):\n \"\"\"\n :说明:\n\n 使用此方法指定群解除群成员禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n \"\"\"\n return await self.api.post('unmute',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def kick(self, target: int, member_id: int, msg: str):\n \"\"\"\n :说明:\n\n 使用此方法移除指定群成员(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n * ``msg: str``: 信息\n \"\"\"\n return await self.api.post('kick',\n params={\n 'target': target,\n 'memberId': member_id,\n 'msg': msg\n })\n\n @argument_validation\n async def quit(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法使Bot退出群聊\n\n :参数:\n\n * ``target: int``: 退出的群号\n \"\"\"\n return await self.api.post('quit', params={'target': target})\n\n @argument_validation\n async def mute_all(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法令指定群进行全体禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n \"\"\"\n return await self.api.post('muteAll', params={'target': target})\n\n @argument_validation\n async def unmute_all(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法令指定群解除全体禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n \"\"\"\n return await self.api.post('unmuteAll', params={'target': target})\n\n @argument_validation\n async def group_config(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法获取群设置\n\n :参数:\n\n * ``target: int``: 指定群的群号\n\n :返回:\n\n .. code-block:: json\n\n {\n \"name\": \"群名称\",\n \"announcement\": \"群公告\",\n \"confessTalk\": true,\n \"allowMemberInvite\": true,\n \"autoApprove\": true,\n \"anonymousChat\": true\n }\n \"\"\"\n return await self.api.request('groupConfig', params={'target': target})\n\n @argument_validation\n async def modify_group_config(self, target: int, config: Dict[str, Any]):\n \"\"\"\n :说明:\n\n 使用此方法修改群设置(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``config: Dict[str, Any]``: 群设置, 格式见 ``group_config`` 的返回值\n \"\"\"\n return await self.api.post('groupConfig',\n params={\n 'target': target,\n 'config': config\n })\n\n @argument_validation\n async def member_info(self, target: int, member_id: int):\n \"\"\"\n :说明:\n\n 使用此方法获取群员资料\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 群员QQ号\n\n :返回:\n\n .. code-block:: json\n\n {\n \"name\": \"群名片\",\n \"specialTitle\": \"群头衔\"\n }\n \"\"\"\n return await self.api.request('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def modify_member_info(self, target: int, member_id: int,\n info: Dict[str, Any]):\n \"\"\"\n :说明:\n\n 使用此方法修改群员资料(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 群员QQ号\n * ``info: Dict[str, Any]``: 群员资料, 格式见 ``member_info`` 的返回值\n \"\"\"\n return await self.api.post('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id,\n 'info': info\n })\n",
"path": "packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py"
}
] | [
{
"content": "from datetime import datetime, timedelta\nfrom io import BytesIO\nfrom ipaddress import IPv4Address\nfrom typing import Any, Dict, List, NoReturn, Optional, Tuple, Union\n\nimport httpx\n\nfrom nonebot.adapters import Bot as BaseBot\nfrom nonebot.config import Config\nfrom nonebot.drivers import Driver, WebSocket\nfrom nonebot.exception import ApiNotAvailable, RequestDenied\nfrom nonebot.typing import overrides\n\nfrom .config import Config as MiraiConfig\nfrom .event import Event, FriendMessage, GroupMessage, TempMessage\nfrom .message import MessageChain, MessageSegment\nfrom .utils import Log, argument_validation, catch_network_error, process_event\n\n\nclass SessionManager:\n \"\"\"Bot会话管理器, 提供API主动调用接口\"\"\"\n sessions: Dict[int, Tuple[str, datetime, httpx.AsyncClient]] = {}\n session_expiry: timedelta = timedelta(minutes=15)\n\n def __init__(self, session_key: str, client: httpx.AsyncClient):\n self.session_key, self.client = session_key, client\n\n @catch_network_error\n async def post(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :说明:\n\n 以POST方式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)\n\n :返回:\n\n - ``Dict[str, Any]``: API 返回值\n \"\"\"\n response = await self.client.post(\n path,\n json={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def request(self,\n path: str,\n *,\n params: Optional[Dict[str, Any]] = None) -> Any:\n \"\"\"\n :说明:\n\n 以GET方式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Optional[Dict[str, Any]]``: 请求参数 (无需sessionKey)\n \"\"\"\n response = await self.client.get(\n path,\n params={\n **(params or {}),\n 'sessionKey': self.session_key,\n },\n timeout=3,\n )\n response.raise_for_status()\n return response.json()\n\n @catch_network_error\n async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:\n \"\"\"\n :说明:\n\n 以表单(``multipart/form-data``)形式主动提交API请求\n\n :参数:\n\n * ``path: str``: 对应API路径\n * ``params: Dict[str, Any]``: 请求参数 (无需sessionKey)\n \"\"\"\n files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}\n form = {k: v for k, v in params.items() if k not in files}\n form['sessionKey'] = self.session_key\n response = await self.client.post(\n path,\n data=form,\n files=files,\n timeout=6,\n )\n response.raise_for_status()\n return response.json()\n\n @classmethod\n async def new(cls, self_id: int, *, host: IPv4Address, port: int,\n auth_key: str) -> \"SessionManager\":\n session = cls.get(self_id)\n if session is not None:\n return session\n\n client = httpx.AsyncClient(base_url=f'http://{host}:{port}')\n response = await client.post('/auth', json={'authKey': auth_key})\n response.raise_for_status()\n auth = response.json()\n assert auth['code'] == 0\n session_key = auth['session']\n response = await client.post('/verify',\n json={\n 'sessionKey': session_key,\n 'qq': self_id\n })\n assert response.json()['code'] == 0\n cls.sessions[self_id] = session_key, datetime.now(), client\n\n return cls(session_key, client)\n\n @classmethod\n def get(cls,\n self_id: int,\n check_expire: bool = True) -> Optional[\"SessionManager\"]:\n if self_id not in cls.sessions:\n return None\n key, time, client = cls.sessions[self_id]\n if check_expire and (datetime.now() - time > cls.session_expiry):\n return None\n return cls(key, client)\n\n\nclass Bot(BaseBot):\n \"\"\"\n mirai-api-http 协议 Bot 适配。\n\n \\:\\:\\: warning\n API中为了使代码更加整洁, 我们采用了与PEP8相符的命名规则取代Mirai原有的驼峰命名\n\n 部分字段可能与文档在符号上不一致\n \\:\\:\\:\n\n \"\"\"\n\n @overrides(BaseBot)\n def __init__(self,\n connection_type: str,\n self_id: str,\n *,\n websocket: Optional[WebSocket] = None):\n super().__init__(connection_type, self_id, websocket=websocket)\n\n @property\n @overrides(BaseBot)\n def type(self) -> str:\n return \"mirai\"\n\n @property\n def alive(self) -> bool:\n return not self.websocket.closed\n\n @property\n def api(self) -> SessionManager:\n \"\"\"返回该Bot对象的会话管理实例以提供API主动调用\"\"\"\n api = SessionManager.get(self_id=int(self.self_id))\n assert api is not None, 'SessionManager has not been initialized'\n return api\n\n @classmethod\n @overrides(BaseBot)\n async def check_permission(cls, driver: \"Driver\", connection_type: str,\n headers: dict, body: Optional[bytes]) -> str:\n if connection_type == 'ws':\n raise RequestDenied(\n status_code=501,\n reason='Websocket connection is not implemented')\n self_id: Optional[str] = headers.get('bot')\n if self_id is None:\n raise RequestDenied(status_code=400,\n reason='Header `Bot` is required.')\n self_id = str(self_id).strip()\n await SessionManager.new(\n int(self_id),\n host=cls.mirai_config.host, # type: ignore\n port=cls.mirai_config.port, #type: ignore\n auth_key=cls.mirai_config.auth_key) # type: ignore\n return self_id\n\n @classmethod\n @overrides(BaseBot)\n def register(cls, driver: \"Driver\", config: \"Config\"):\n cls.mirai_config = MiraiConfig(**config.dict())\n if (cls.mirai_config.auth_key and cls.mirai_config.host and\n cls.mirai_config.port) is None:\n raise ApiNotAvailable('mirai')\n super().register(driver, config)\n\n @overrides(BaseBot)\n async def handle_message(self, message: dict):\n Log.debug(f'received message {message}')\n try:\n await process_event(\n bot=self,\n event=Event.new({\n **message,\n 'self_id': self.self_id,\n }),\n )\n except Exception as e:\n Log.error(f'Failed to handle message: {message}', e)\n\n @overrides(BaseBot)\n async def _call_api(self, api: str, **data) -> NoReturn:\n raise NotImplementedError\n\n @overrides(BaseBot)\n async def call_api(self, api: str, **data) -> NoReturn:\n \"\"\"\n \\:\\:\\: danger\n 由于Mirai的HTTP API特殊性, 该API暂时无法实现\n \\:\\:\\:\n\n \\:\\:\\: tip\n 你可以使用 ``MiraiBot.api`` 中提供的调用方法来代替\n \\:\\:\\:\n \"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n def __getattr__(self, key: str) -> NoReturn:\n \"\"\"由于Mirai的HTTP API特殊性, 该API暂时无法实现\"\"\"\n raise NotImplementedError\n\n @overrides(BaseBot)\n @argument_validation\n async def send(self,\n event: Event,\n message: Union[MessageChain, MessageSegment, str],\n at_sender: bool = False):\n \"\"\"\n :说明:\n\n 根据 ``event`` 向触发事件的主体发送信息\n\n :参数:\n\n * ``event: Event``: Event对象\n * ``message: Union[MessageChain, MessageSegment, str]``: 要发送的消息\n * ``at_sender: bool``: 是否 @ 事件主体\n \"\"\"\n if not isinstance(message, MessageChain):\n message = MessageChain(message)\n if isinstance(event, FriendMessage):\n return await self.send_friend_message(target=event.sender.id,\n message_chain=message)\n elif isinstance(event, GroupMessage):\n if at_sender:\n message = MessageSegment.at(event.sender.id) + message\n return await self.send_group_message(group=event.sender.group.id,\n message_chain=message)\n elif isinstance(event, TempMessage):\n return await self.send_temp_message(qq=event.sender.id,\n group=event.sender.group.id,\n message_chain=message)\n else:\n raise ValueError(f'Unsupported event type {event!r}.')\n\n @argument_validation\n async def send_friend_message(self, target: int,\n message_chain: MessageChain):\n \"\"\"\n :说明:\n\n 使用此方法向指定好友发送消息\n\n :参数:\n\n * ``target: int``: 发送消息目标好友的 QQ 号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n \"\"\"\n return await self.api.post('sendFriendMessage',\n params={\n 'target': target,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_temp_message(self, qq: int, group: int,\n message_chain: MessageChain):\n \"\"\"\n :说明:\n\n 使用此方法向临时会话对象发送消息\n\n :参数:\n\n * ``qq: int``: 临时会话对象 QQ 号\n * ``group: int``: 临时会话群号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n \"\"\"\n return await self.api.post('sendTempMessage',\n params={\n 'qq': qq,\n 'group': group,\n 'messageChain': message_chain.export()\n })\n\n @argument_validation\n async def send_group_message(self,\n group: int,\n message_chain: MessageChain,\n quote: Optional[int] = None):\n \"\"\"\n :说明:\n\n 使用此方法向指定群发送消息\n\n :参数:\n\n * ``group: int``: 发送消息目标群的群号\n * ``message_chain: MessageChain``: 消息链,是一个消息对象构成的数组\n * ``quote: Optional[int]``: 引用一条消息的 message_id 进行回复\n \"\"\"\n return await self.api.post('sendGroupMessage',\n params={\n 'group': group,\n 'messageChain': message_chain.export(),\n 'quote': quote\n })\n\n @argument_validation\n async def recall(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法撤回指定消息。对于bot发送的消息,有2分钟时间限制。对于撤回群聊中群员的消息,需要有相应权限\n\n :参数:\n\n * ``target: int``: 需要撤回的消息的message_id\n \"\"\"\n return await self.api.post('recall', params={'target': target})\n\n @argument_validation\n async def send_image_message(self, target: int, qq: int, group: int,\n urls: List[str]) -> List[str]:\n \"\"\"\n :说明:\n\n 使用此方法向指定对象(群或好友)发送图片消息\n 除非需要通过此手段获取image_id,否则不推荐使用该接口\n\n > 当qq和group同时存在时,表示发送临时会话图片,qq为临时会话对象QQ号,group为临时会话发起的群号\n\n :参数:\n\n * ``target: int``: 发送对象的QQ号或群号,可能存在歧义\n * ``qq: int``: 发送对象的QQ号\n * ``group: int``: 发送对象的群号\n * ``urls: List[str]``: 是一个url字符串构成的数组\n\n :返回:\n\n - ``List[str]``: 一个包含图片imageId的数组\n \"\"\"\n return await self.api.post('sendImageMessage',\n params={\n 'target': target,\n 'qq': qq,\n 'group': group,\n 'urls': urls\n })\n\n @argument_validation\n async def upload_image(self, type: str, img: BytesIO):\n \"\"\"\n :说明:\n\n 使用此方法上传图片文件至服务器并返回Image_id\n\n :参数:\n\n * ``type: str``: \"friend\" 或 \"group\" 或 \"temp\"\n * ``img: BytesIO``: 图片的BytesIO对象\n \"\"\"\n return await self.api.upload('uploadImage',\n params={\n 'type': type,\n 'img': img\n })\n\n @argument_validation\n async def upload_voice(self, type: str, voice: BytesIO):\n \"\"\"\n :说明:\n\n 使用此方法上传语音文件至服务器并返回voice_id\n\n :参数:\n\n * ``type: str``: 当前仅支持 \"group\"\n * ``voice: BytesIO``: 语音的BytesIO对象\n \"\"\"\n return await self.api.upload('uploadVoice',\n params={\n 'type': type,\n 'voice': voice\n })\n\n @argument_validation\n async def fetch_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最老消息和最老各类事件\n (会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('fetchMessage', params={'count': count})\n\n @argument_validation\n async def fetch_latest_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最新消息和最新各类事件\n (会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('fetchLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def peek_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最老消息和最老各类事件\n (不会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('peekMessage', params={'count': count})\n\n @argument_validation\n async def peek_latest_message(self, count: int = 10):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收到的最新消息和最新各类事件\n (不会从MiraiApiHttp消息记录中删除)\n\n :参数:\n\n * ``count: int``: 获取消息和事件的数量\n \"\"\"\n return await self.api.request('peekLatestMessage',\n params={'count': count})\n\n @argument_validation\n async def messsage_from_id(self, id: int):\n \"\"\"\n :说明:\n\n 通过messageId获取一条被缓存的消息\n 使用此方法获取bot接收到的消息和各类事件\n\n :参数:\n\n * ``id: int``: 获取消息的message_id\n \"\"\"\n return await self.api.request('messageFromId', params={'id': id})\n\n @argument_validation\n async def count_message(self):\n \"\"\"\n :说明:\n\n 使用此方法获取bot接收并缓存的消息总数,注意不包含被删除的\n \"\"\"\n return await self.api.request('countMessage')\n\n @argument_validation\n async def friend_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot的好友列表\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的好友列表数据\n \"\"\"\n return await self.api.request('friendList')\n\n @argument_validation\n async def group_list(self) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot的群列表\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的群列表数据\n \"\"\"\n return await self.api.request('groupList')\n\n @argument_validation\n async def member_list(self, target: int) -> List[Dict[str, Any]]:\n \"\"\"\n :说明:\n\n 使用此方法获取bot指定群种的成员列表\n\n :参数:\n\n * ``target: int``: 指定群的群号\n\n :返回:\n\n - ``List[Dict[str, Any]]``: 返回的群成员列表数据\n \"\"\"\n return await self.api.request('memberList', params={'target': target})\n\n @argument_validation\n async def mute(self, target: int, member_id: int, time: int):\n \"\"\"\n :说明:\n\n 使用此方法指定群禁言指定群员(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n * ``time: int``: 禁言时长,单位为秒,最多30天\n \"\"\"\n return await self.api.post('mute',\n params={\n 'target': target,\n 'memberId': member_id,\n 'time': time\n })\n\n @argument_validation\n async def unmute(self, target: int, member_id: int):\n \"\"\"\n :说明:\n\n 使用此方法指定群解除群成员禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n \"\"\"\n return await self.api.post('unmute',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def kick(self, target: int, member_id: int, msg: str):\n \"\"\"\n :说明:\n\n 使用此方法移除指定群成员(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 指定群员QQ号\n * ``msg: str``: 信息\n \"\"\"\n return await self.api.post('kick',\n params={\n 'target': target,\n 'memberId': member_id,\n 'msg': msg\n })\n\n @argument_validation\n async def quit(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法使Bot退出群聊\n\n :参数:\n\n * ``target: int``: 退出的群号\n \"\"\"\n return await self.api.post('quit', params={'target': target})\n\n @argument_validation\n async def mute_all(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法令指定群进行全体禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n \"\"\"\n return await self.api.post('muteAll', params={'target': target})\n\n @argument_validation\n async def unmute_all(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法令指定群解除全体禁言(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n \"\"\"\n return await self.api.post('unmuteAll', params={'target': target})\n\n @argument_validation\n async def group_config(self, target: int):\n \"\"\"\n :说明:\n\n 使用此方法获取群设置\n\n :参数:\n\n * ``target: int``: 指定群的群号\n\n :返回:\n\n .. code-block:: json\n\n {\n \"name\": \"群名称\",\n \"announcement\": \"群公告\",\n \"confessTalk\": true,\n \"allowMemberInvite\": true,\n \"autoApprove\": true,\n \"anonymousChat\": true\n }\n \"\"\"\n return await self.api.request('groupConfig', params={'target': target})\n\n @argument_validation\n async def modify_group_config(self, target: int, config: Dict[str, Any]):\n \"\"\"\n :说明:\n\n 使用此方法修改群设置(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``config: Dict[str, Any]``: 群设置, 格式见 ``group_config`` 的返回值\n \"\"\"\n return await self.api.post('groupConfig',\n params={\n 'target': target,\n 'config': config\n })\n\n @argument_validation\n async def member_info(self, target: int, member_id: int):\n \"\"\"\n :说明:\n\n 使用此方法获取群员资料\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 群员QQ号\n\n :返回:\n\n .. code-block:: json\n\n {\n \"name\": \"群名片\",\n \"specialTitle\": \"群头衔\"\n }\n \"\"\"\n return await self.api.request('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id\n })\n\n @argument_validation\n async def modify_member_info(self, target: int, member_id: int,\n info: Dict[str, Any]):\n \"\"\"\n :说明:\n\n 使用此方法修改群员资料(需要有相关权限)\n\n :参数:\n\n * ``target: int``: 指定群的群号\n * ``member_id: int``: 群员QQ号\n * ``info: Dict[str, Any]``: 群员资料, 格式见 ``member_info`` 的返回值\n \"\"\"\n return await self.api.post('memberInfo',\n params={\n 'target': target,\n 'memberId': member_id,\n 'info': info\n })\n",
"path": "packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py"
}
] | diff --git a/docs/.vuepress/public/plugins.json b/docs/.vuepress/public/plugins.json
index 66603ac8d2d7..035737964633 100644
--- a/docs/.vuepress/public/plugins.json
+++ b/docs/.vuepress/public/plugins.json
@@ -278,5 +278,29 @@
"desc": "用于让机器人撤回自己发出的消息",
"author": "MeetWq",
"repo": "https://github.com/MeetWq/nonebot-plugin-withdraw"
+ },
+ {
+ "id": "nonebot_plugin_pixivrank_search",
+ "link": "nonebot-plugin-pixivrank-search",
+ "name": "nonebot_plugin_pixivrank_search",
+ "desc": "基于RSSHUB阅读器的P站排行和P站搜图",
+ "author": "HibiKier",
+ "repo": "https://github.com/HibiKier/nonebot_plugin_pixivrank_search"
+ },
+ {
+ "id": "nonebot_plugin_russian",
+ "link": "nonebot-plugin-russian",
+ "name": "nonebot_plugin_russian",
+ "desc": "群内小游戏,使用金币赌注的俄罗斯轮盘",
+ "author": "HibiKier",
+ "repo": "https://github.com/HibiKier/nonebot_plugin_russian"
+ },
+ {
+ "id": "nonebot_plugin_statistical",
+ "link": "nonebot-plugin-statistical",
+ "name": "nonebot_plugin_statistical",
+ "desc": "一个简单的功能调用统计以及可视化插件",
+ "author": "HibiKier",
+ "repo": "https://github.com/HibiKier/nonebot_plugin_statistical"
}
-]
+]
\ No newline at end of file
diff --git a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
index ebce2d74bea8..a967025c1bfa 100644
--- a/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
+++ b/packages/nonebot-adapter-mirai/nonebot/adapters/mirai/bot.py
@@ -95,6 +95,7 @@ async def upload(self, path: str, *, params: Dict[str, Any]) -> Any:
"""
files = {k: v for k, v in params.items() if isinstance(v, BytesIO)}
form = {k: v for k, v in params.items() if k not in files}
+ form['sessionKey'] = self.session_key
response = await self.client.post(
path,
data=form,
| Bug: mirai adapter调用upload_image上传图片报错
**描述问题:**
mirai adapter调用upload_image上传图片报错:
```powershell
httpx.HTTPStatusError: 400 Client Error: Bad Request for url: http://127.0.0.1:8000/uploadImage
For more information check: https://httpstatuses.com/400
```
**如何复现?**
```python
with open('file.png', 'rb') as f:
img = BytesIO(f.read())
img_id = await bot.upload_image('group', img)
```
**期望的结果**
```
{'imageId': '{******-****-FD90-491D-141D77303EE5}.png', 'url': 'http://gchat.qpic.cn/gchatpic_new/*********', 'path': ''}
```
**环境信息:**
- OS: Windows10
- Python Version: 3.8.2
- Nonebot Version: 2.0.0a13.post1
- Mirai Version: 2.7-M1
- mirai-api-http Version: 1.12.0
**截图**

**原因**
mah v-1.8.4 的uploadImage api需要提供sessionKey。
经过测试,mah 版本1.12.0,在增加sessionKey后能返回预期结果,
是个新手,代码写的不好,就不提pr了。
> ### [图片文件上传](https://github.com/zxsean/mirai-api-http/blob/master/docs/API.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)
>
> ```
> [POST] /uploadImage
> ```
>
> 使用此方法上传图片文件至服务器并返回ImageId
>
> #### 请求
>
> Content-Type:multipart/form-data
>
> | 名字 | 类型 | 可选 | 举例 | 说明 |
> | ---------- | ------ | ----- | ----------- | ----------------------------- |
> | sessionKey | String | false | YourSession | 已经激活的Session |
> | type | String | false | "friend " | "friend" 或 "group" 或 "temp" |
> | img | File | false | - | 图片文件 |
在mah v-2.0的http adapter中就不需要了
> ### [图片文件上传](https://github.com/project-mirai/mirai-api-http/blob/master/docs/adapter/HttpAdapter.md#%E5%9B%BE%E7%89%87%E6%96%87%E4%BB%B6%E4%B8%8A%E4%BC%A0)
>
> 使用此方法上传图片文件至服务器并返回ImageId
>
> ```
> [POST] /uploadImage
> ```
>
> **本接口为[POST]请求, 参数格式为`multipart/form-data`**
>
> #### 请求:
>
> | 名字 | 类型 | 可选 | 举例 | 说明 |
> | ---------- | ------ | ----- | ----------- | ----------------------------- |
> | sessionKey | String | true | YourSession | 已经激活的Session |
> | type | String | false | "friend" | "friend" 或 "group" 或 "temp" |
> | img | File | false | - | 图片文件 |
|
conda__conda-5232 | [
{
"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom argparse import SUPPRESS\nimport collections\nimport json\nimport os\nfrom os.path import join\nimport sys\nfrom textwrap import wrap\n\nfrom .common import Completer, add_parser_json, stdout_json_success\nfrom .. import CondaError\nfrom .._vendor.auxlib.compat import isiterable\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom ..base.constants import CONDA_HOMEPAGE_URL\nfrom ..base.context import context\nfrom ..common.compat import iteritems, string_types, text_type\nfrom ..common.configuration import pretty_list, pretty_map\nfrom ..common.constants import NULL\nfrom ..common.yaml import yaml_dump, yaml_load\nfrom ..config import (rc_bool_keys, rc_list_keys, rc_other, rc_string_keys, sys_rc_path,\n user_rc_path)\nfrom ..exceptions import CondaKeyError, CondaValueError, CouldntParseError\n\ndescr = \"\"\"\nModify configuration values in .condarc. This is modeled after the git\nconfig command. Writes to the user .condarc file (%s) by default.\n\n\"\"\" % user_rc_path\n\n# Note, the extra whitespace in the list keys is on purpose. It's so the\n# formatting from help2man is still valid YAML (otherwise it line wraps the\n# keys like \"- conda - defaults\"). Technically the parser here still won't\n# recognize it because it removes the indentation, but at least it will be\n# valid.\nadditional_descr = \"\"\"\nSee `conda config --describe` or %s/docs/config.html\nfor details on all the options that can go in .condarc.\n\nExamples:\n\nDisplay all configuration values as calculated and compiled:\n\n conda config --show\n\nDisplay all identified configuration sources:\n\n conda config --show-sources\n\nDescribe all available configuration options:\n\n conda config --describe\n\nAdd the conda-canary channel:\n\n conda config --add channels conda-canary\n\nSet the output verbosity to level 3 (highest):\n\n conda config --set verbosity 3\n\"\"\" % CONDA_HOMEPAGE_URL\n\n\nclass SingleValueKey(Completer):\n def _get_items(self):\n return rc_bool_keys + \\\n rc_string_keys + \\\n ['yes', 'no', 'on', 'off', 'true', 'false']\n\n\nclass ListKey(Completer):\n def _get_items(self):\n return rc_list_keys\n\n\nclass BoolOrListKey(Completer):\n def __contains__(self, other):\n return other in self.get_items()\n\n def _get_items(self):\n return rc_list_keys + rc_bool_keys\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'config',\n description=descr,\n help=descr,\n epilog=additional_descr,\n )\n add_parser_json(p)\n\n # TODO: use argparse.FileType\n location = p.add_mutually_exclusive_group()\n location.add_argument(\n \"--system\",\n action=\"store_true\",\n help=\"\"\"Write to the system .condarc file ({system}). Otherwise writes to the user\n config file ({user}).\"\"\".format(system=sys_rc_path,\n user=user_rc_path),\n )\n location.add_argument(\n \"--env\",\n action=\"store_true\",\n help=\"Write to the active conda environment .condarc file (%s). \"\n \"If no environment is active, write to the user config file (%s).\"\n \"\" % (os.getenv('CONDA_PREFIX', \"<no active environment>\"), user_rc_path),\n )\n location.add_argument(\n \"--file\",\n action=\"store\",\n help=\"\"\"Write to the given file. Otherwise writes to the user config file ({user})\nor the file path given by the 'CONDARC' environment variable, if it is set\n(default: %(default)s).\"\"\".format(user=user_rc_path),\n default=os.environ.get('CONDARC', user_rc_path)\n )\n\n # XXX: Does this really have to be mutually exclusive. I think the below\n # code will work even if it is a regular group (although combination of\n # --add and --remove with the same keys will not be well-defined).\n action = p.add_mutually_exclusive_group(required=True)\n action.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"Display all configuration values as calculated and compiled.\",\n )\n action.add_argument(\n \"--show-sources\",\n action=\"store_true\",\n help=\"Display all identified configuration sources.\",\n )\n action.add_argument(\n \"--validate\",\n action=\"store_true\",\n help=\"Validate all configuration sources.\",\n )\n action.add_argument(\n \"--describe\",\n action=\"store_true\",\n help=\"Describe available configuration parameters.\",\n )\n action.add_argument(\n \"--get\",\n nargs='*',\n action=\"store\",\n help=\"Get a configuration value.\",\n default=None,\n metavar='KEY',\n choices=BoolOrListKey()\n )\n action.add_argument(\n \"--append\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the end of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--prepend\", \"--add\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the beginning of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--set\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Set a boolean or string key\"\"\",\n default=[],\n choices=SingleValueKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Remove a configuration value from a list key. This removes\n all instances of the value.\"\"\",\n default=[],\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove-key\",\n nargs=1,\n action=\"append\",\n help=\"\"\"Remove a configuration key (and all its values).\"\"\",\n default=[],\n metavar=\"KEY\",\n )\n\n p.add_argument(\n \"-f\", \"--force\",\n action=\"store_true\",\n default=NULL,\n help=SUPPRESS, # TODO: No longer used. Remove in a future release.\n )\n\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n try:\n execute_config(args, parser)\n except (CouldntParseError, NotImplementedError) as e:\n raise CondaError(e)\n\n\ndef format_dict(d):\n lines = []\n for k, v in iteritems(d):\n if isinstance(v, collections.Mapping):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_map(v))\n else:\n lines.append(\"%s: {}\" % k)\n elif isiterable(v):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_list(v))\n else:\n lines.append(\"%s: []\" % k)\n else:\n lines.append(\"%s: %s\" % (k, v if v is not None else \"None\"))\n return lines\n\n\ndef execute_config(args, parser):\n json_warnings = []\n json_get = {}\n\n if args.show_sources:\n if context.json:\n print(json.dumps(context.collect_all(), sort_keys=True,\n indent=2, separators=(',', ': ')))\n else:\n lines = []\n for source, reprs in iteritems(context.collect_all()):\n lines.append(\"==> %s <==\" % source)\n lines.extend(format_dict(reprs))\n lines.append('')\n print('\\n'.join(lines))\n return\n\n if args.show:\n from collections import OrderedDict\n\n d = OrderedDict((key, getattr(context, key))\n for key in context.list_parameters())\n if context.json:\n print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n # coerce channels\n d['custom_channels'] = {k: text_type(v).replace(k, '') # TODO: the replace here isn't quite right # NOQA\n for k, v in iteritems(d['custom_channels'])}\n # TODO: custom_multichannels needs better formatting\n d['custom_multichannels'] = {k: json.dumps([text_type(c) for c in chnls])\n for k, chnls in iteritems(d['custom_multichannels'])}\n\n print('\\n'.join(format_dict(d)))\n context.validate_configuration()\n return\n\n if args.describe:\n paramater_names = context.list_parameters()\n if context.json:\n print(json.dumps([context.describe_parameter(name) for name in paramater_names],\n sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n def clean_element_type(element_types):\n _types = set()\n for et in element_types:\n _types.add('str') if isinstance(et, string_types) else _types.add('%s' % et)\n return tuple(sorted(_types))\n\n for name in paramater_names:\n details = context.describe_parameter(name)\n aliases = details['aliases']\n string_delimiter = details.get('string_delimiter')\n element_types = details['element_types']\n if details['parameter_type'] == 'primitive':\n print(\"%s (%s)\" % (name, ', '.join(clean_element_type(element_types))))\n else:\n print(\"%s (%s: %s)\" % (name, details['parameter_type'],\n ', '.join(clean_element_type(element_types))))\n def_str = ' default: %s' % json.dumps(details['default_value'], indent=2,\n separators=(',', ': '),\n cls=EntityEncoder)\n print('\\n '.join(def_str.split('\\n')))\n if aliases:\n print(\" aliases: %s\" % ', '.join(aliases))\n if string_delimiter:\n print(\" string delimiter: '%s'\" % string_delimiter)\n print('\\n '.join(wrap(' ' + details['description'], 70)))\n print()\n return\n\n if args.validate:\n context.validate_all()\n return\n\n if args.system:\n rc_path = sys_rc_path\n elif args.env:\n if 'CONDA_PREFIX' in os.environ:\n rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')\n else:\n rc_path = user_rc_path\n elif args.file:\n rc_path = args.file\n else:\n rc_path = user_rc_path\n\n # read existing condarc\n if os.path.exists(rc_path):\n with open(rc_path, 'r') as fh:\n rc_config = yaml_load(fh) or {}\n else:\n rc_config = {}\n\n # Get\n if args.get is not None:\n context.validate_all()\n if args.get == []:\n args.get = sorted(rc_config.keys())\n for key in args.get:\n if key not in rc_list_keys + rc_bool_keys + rc_string_keys:\n if key not in rc_other:\n message = \"unknown key %s\" % key\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n continue\n if key not in rc_config:\n continue\n\n if context.json:\n json_get[key] = rc_config[key]\n continue\n\n if isinstance(rc_config[key], (bool, string_types)):\n print(\"--set\", key, rc_config[key])\n else: # assume the key is a list-type\n # Note, since conda config --add prepends, these are printed in\n # the reverse order so that entering them in this order will\n # recreate the same file\n items = rc_config.get(key, [])\n numitems = len(items)\n for q, item in enumerate(reversed(items)):\n # Use repr so that it can be pasted back in to conda config --add\n if key == \"channels\" and q in (0, numitems-1):\n print(\"--add\", key, repr(item),\n \" # lowest priority\" if q == 0 else \" # highest priority\")\n else:\n print(\"--add\", key, repr(item))\n\n # prepend, append, add\n for arg, prepend in zip((args.prepend, args.append), (True, False)):\n sequence_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'sequence']\n for key, item in arg:\n if key == 'channels' and key not in rc_config:\n rc_config[key] = ['defaults']\n if key not in sequence_parameters:\n raise CondaValueError(\"Key '%s' is not a known sequence parameter.\" % key)\n if not isinstance(rc_config.get(key, []), list):\n bad = rc_config[key].__class__.__name__\n raise CouldntParseError(\"key %r should be a list, not %s.\" % (key, bad))\n if key == 'default_channels' and rc_path != sys_rc_path:\n msg = \"'default_channels' is only configurable for system installs\"\n raise NotImplementedError(msg)\n arglist = rc_config.setdefault(key, [])\n if item in arglist:\n # Right now, all list keys should not contain duplicates\n message = \"Warning: '%s' already in '%s' list, moving to the %s\" % (\n item, key, \"top\" if prepend else \"bottom\")\n arglist = rc_config[key] = [p for p in arglist if p != item]\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n arglist.insert(0 if prepend else len(arglist), item)\n\n # Set\n for key, item in args.set:\n primitive_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'primitive']\n if key not in primitive_parameters:\n raise CondaValueError(\"Key '%s' is not a known primitive parameter.\" % key)\n value = context.typify_parameter(key, item)\n rc_config[key] = value\n\n # Remove\n for key, item in args.remove:\n if key not in rc_config:\n if key != 'channels':\n raise CondaKeyError(key, \"key %r is not in the config file\" % key)\n rc_config[key] = ['defaults']\n if item not in rc_config[key]:\n raise CondaKeyError(key, \"%r is not in the %r key of the config file\" %\n (item, key))\n rc_config[key] = [i for i in rc_config[key] if i != item]\n\n # Remove Key\n for key, in args.remove_key:\n if key not in rc_config:\n raise CondaKeyError(key, \"key %r is not in the config file\" %\n key)\n del rc_config[key]\n\n # config.rc_keys\n if not args.get:\n with open(rc_path, 'w') as rc:\n rc.write(yaml_dump(rc_config))\n\n if context.json:\n stdout_json_success(\n rc_path=rc_path,\n warnings=json_warnings,\n get=json_get\n )\n return\n",
"path": "conda/cli/main_config.py"
}
] | [
{
"content": "# (c) 2012-2013 Continuum Analytics, Inc. / http://continuum.io\n# All Rights Reserved\n#\n# conda is distributed under the terms of the BSD 3-clause license.\n# Consult LICENSE.txt or http://opensource.org/licenses/BSD-3-Clause.\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nfrom argparse import SUPPRESS\nimport collections\nimport json\nimport os\nfrom os.path import join\nimport sys\nfrom textwrap import wrap\n\nfrom .common import Completer, add_parser_json, stdout_json_success\nfrom .. import CondaError\nfrom .._vendor.auxlib.compat import isiterable\nfrom .._vendor.auxlib.entity import EntityEncoder\nfrom ..base.constants import CONDA_HOMEPAGE_URL\nfrom ..base.context import context\nfrom ..common.compat import iteritems, string_types, text_type\nfrom ..common.configuration import pretty_list, pretty_map\nfrom ..common.constants import NULL\nfrom ..common.yaml import yaml_dump, yaml_load\nfrom ..config import (rc_bool_keys, rc_list_keys, rc_other, rc_string_keys, sys_rc_path,\n user_rc_path)\nfrom ..exceptions import CondaKeyError, CondaValueError, CouldntParseError\n\ndescr = \"\"\"\nModify configuration values in .condarc. This is modeled after the git\nconfig command. Writes to the user .condarc file (%s) by default.\n\n\"\"\" % user_rc_path\n\n# Note, the extra whitespace in the list keys is on purpose. It's so the\n# formatting from help2man is still valid YAML (otherwise it line wraps the\n# keys like \"- conda - defaults\"). Technically the parser here still won't\n# recognize it because it removes the indentation, but at least it will be\n# valid.\nadditional_descr = \"\"\"\nSee `conda config --describe` or %s/docs/config.html\nfor details on all the options that can go in .condarc.\n\nExamples:\n\nDisplay all configuration values as calculated and compiled:\n\n conda config --show\n\nDisplay all identified configuration sources:\n\n conda config --show-sources\n\nDescribe all available configuration options:\n\n conda config --describe\n\nAdd the conda-canary channel:\n\n conda config --add channels conda-canary\n\nSet the output verbosity to level 3 (highest):\n\n conda config --set verbosity 3\n\"\"\" % CONDA_HOMEPAGE_URL\n\n\nclass SingleValueKey(Completer):\n def _get_items(self):\n return rc_bool_keys + \\\n rc_string_keys + \\\n ['yes', 'no', 'on', 'off', 'true', 'false']\n\n\nclass ListKey(Completer):\n def _get_items(self):\n return rc_list_keys\n\n\nclass BoolOrListKey(Completer):\n def __contains__(self, other):\n return other in self.get_items()\n\n def _get_items(self):\n return rc_list_keys + rc_bool_keys\n\n\ndef configure_parser(sub_parsers):\n p = sub_parsers.add_parser(\n 'config',\n description=descr,\n help=descr,\n epilog=additional_descr,\n )\n add_parser_json(p)\n\n # TODO: use argparse.FileType\n location = p.add_mutually_exclusive_group()\n location.add_argument(\n \"--system\",\n action=\"store_true\",\n help=\"\"\"Write to the system .condarc file ({system}). Otherwise writes to the user\n config file ({user}).\"\"\".format(system=sys_rc_path,\n user=user_rc_path),\n )\n location.add_argument(\n \"--env\",\n action=\"store_true\",\n help=\"Write to the active conda environment .condarc file (%s). \"\n \"If no environment is active, write to the user config file (%s).\"\n \"\" % (os.getenv('CONDA_PREFIX', \"<no active environment>\"), user_rc_path),\n )\n location.add_argument(\n \"--file\",\n action=\"store\",\n help=\"\"\"Write to the given file. Otherwise writes to the user config file ({user})\nor the file path given by the 'CONDARC' environment variable, if it is set\n(default: %(default)s).\"\"\".format(user=user_rc_path),\n default=os.environ.get('CONDARC', user_rc_path)\n )\n\n # XXX: Does this really have to be mutually exclusive. I think the below\n # code will work even if it is a regular group (although combination of\n # --add and --remove with the same keys will not be well-defined).\n action = p.add_mutually_exclusive_group(required=True)\n action.add_argument(\n \"--show\",\n action=\"store_true\",\n help=\"Display all configuration values as calculated and compiled.\",\n )\n action.add_argument(\n \"--show-sources\",\n action=\"store_true\",\n help=\"Display all identified configuration sources.\",\n )\n action.add_argument(\n \"--validate\",\n action=\"store_true\",\n help=\"Validate all configuration sources.\",\n )\n action.add_argument(\n \"--describe\",\n action=\"store_true\",\n help=\"Describe available configuration parameters.\",\n )\n action.add_argument(\n \"--get\",\n nargs='*',\n action=\"store\",\n help=\"Get a configuration value.\",\n default=None,\n metavar='KEY',\n choices=BoolOrListKey()\n )\n action.add_argument(\n \"--append\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the end of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--prepend\", \"--add\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Add one configuration value to the beginning of a list key.\"\"\",\n default=[],\n choices=ListKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--set\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Set a boolean or string key\"\"\",\n default=[],\n choices=SingleValueKey(),\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove\",\n nargs=2,\n action=\"append\",\n help=\"\"\"Remove a configuration value from a list key. This removes\n all instances of the value.\"\"\",\n default=[],\n metavar=('KEY', 'VALUE'),\n )\n action.add_argument(\n \"--remove-key\",\n nargs=1,\n action=\"append\",\n help=\"\"\"Remove a configuration key (and all its values).\"\"\",\n default=[],\n metavar=\"KEY\",\n )\n\n p.add_argument(\n \"-f\", \"--force\",\n action=\"store_true\",\n default=NULL,\n help=SUPPRESS, # TODO: No longer used. Remove in a future release.\n )\n\n p.set_defaults(func=execute)\n\n\ndef execute(args, parser):\n try:\n execute_config(args, parser)\n except (CouldntParseError, NotImplementedError) as e:\n raise CondaError(e)\n\n\ndef format_dict(d):\n lines = []\n for k, v in iteritems(d):\n if isinstance(v, collections.Mapping):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_map(v))\n else:\n lines.append(\"%s: {}\" % k)\n elif isiterable(v):\n if v:\n lines.append(\"%s:\" % k)\n lines.append(pretty_list(v))\n else:\n lines.append(\"%s: []\" % k)\n else:\n lines.append(\"%s: %s\" % (k, v if v is not None else \"None\"))\n return lines\n\n\ndef execute_config(args, parser):\n json_warnings = []\n json_get = {}\n\n if args.show_sources:\n if context.json:\n print(json.dumps(context.collect_all(), sort_keys=True,\n indent=2, separators=(',', ': ')))\n else:\n lines = []\n for source, reprs in iteritems(context.collect_all()):\n lines.append(\"==> %s <==\" % source)\n lines.extend(format_dict(reprs))\n lines.append('')\n print('\\n'.join(lines))\n return\n\n if args.show:\n from collections import OrderedDict\n\n d = OrderedDict((key, getattr(context, key))\n for key in context.list_parameters())\n if context.json:\n print(json.dumps(d, sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n # coerce channels\n d['custom_channels'] = {k: text_type(v).replace(k, '') # TODO: the replace here isn't quite right # NOQA\n for k, v in iteritems(d['custom_channels'])}\n # TODO: custom_multichannels needs better formatting\n d['custom_multichannels'] = {k: json.dumps([text_type(c) for c in chnls])\n for k, chnls in iteritems(d['custom_multichannels'])}\n\n print('\\n'.join(format_dict(d)))\n context.validate_configuration()\n return\n\n if args.describe:\n paramater_names = context.list_parameters()\n if context.json:\n print(json.dumps([context.describe_parameter(name) for name in paramater_names],\n sort_keys=True, indent=2, separators=(',', ': '),\n cls=EntityEncoder))\n else:\n def clean_element_type(element_types):\n _types = set()\n for et in element_types:\n _types.add('str') if isinstance(et, string_types) else _types.add('%s' % et)\n return tuple(sorted(_types))\n\n for name in paramater_names:\n details = context.describe_parameter(name)\n aliases = details['aliases']\n string_delimiter = details.get('string_delimiter')\n element_types = details['element_types']\n if details['parameter_type'] == 'primitive':\n print(\"%s (%s)\" % (name, ', '.join(clean_element_type(element_types))))\n else:\n print(\"%s (%s: %s)\" % (name, details['parameter_type'],\n ', '.join(clean_element_type(element_types))))\n def_str = ' default: %s' % json.dumps(details['default_value'], indent=2,\n separators=(',', ': '),\n cls=EntityEncoder)\n print('\\n '.join(def_str.split('\\n')))\n if aliases:\n print(\" aliases: %s\" % ', '.join(aliases))\n if string_delimiter:\n print(\" string delimiter: '%s'\" % string_delimiter)\n print('\\n '.join(wrap(' ' + details['description'], 70)))\n print()\n return\n\n if args.validate:\n context.validate_all()\n return\n\n if args.system:\n rc_path = sys_rc_path\n elif args.env:\n if 'CONDA_PREFIX' in os.environ:\n rc_path = join(os.environ['CONDA_PREFIX'], '.condarc')\n else:\n rc_path = user_rc_path\n elif args.file:\n rc_path = args.file\n else:\n rc_path = user_rc_path\n\n # read existing condarc\n if os.path.exists(rc_path):\n with open(rc_path, 'r') as fh:\n rc_config = yaml_load(fh) or {}\n else:\n rc_config = {}\n\n # Get\n if args.get is not None:\n context.validate_all()\n if args.get == []:\n args.get = sorted(rc_config.keys())\n for key in args.get:\n if key not in rc_list_keys + rc_bool_keys + rc_string_keys:\n if key not in rc_other:\n message = \"unknown key %s\" % key\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n continue\n if key not in rc_config:\n continue\n\n if context.json:\n json_get[key] = rc_config[key]\n continue\n\n if isinstance(rc_config[key], (bool, string_types)):\n print(\"--set\", key, rc_config[key])\n else: # assume the key is a list-type\n # Note, since conda config --add prepends, these are printed in\n # the reverse order so that entering them in this order will\n # recreate the same file\n items = rc_config.get(key, [])\n numitems = len(items)\n for q, item in enumerate(reversed(items)):\n # Use repr so that it can be pasted back in to conda config --add\n if key == \"channels\" and q in (0, numitems-1):\n print(\"--add\", key, repr(item),\n \" # lowest priority\" if q == 0 else \" # highest priority\")\n else:\n print(\"--add\", key, repr(item))\n\n # prepend, append, add\n for arg, prepend in zip((args.prepend, args.append), (True, False)):\n sequence_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'sequence']\n for key, item in arg:\n if key == 'channels' and key not in rc_config:\n rc_config[key] = ['defaults']\n if key not in sequence_parameters:\n raise CondaValueError(\"Key '%s' is not a known sequence parameter.\" % key)\n if not isinstance(rc_config.get(key, []), list):\n bad = rc_config[key].__class__.__name__\n raise CouldntParseError(\"key %r should be a list, not %s.\" % (key, bad))\n if key == 'default_channels' and rc_path != sys_rc_path:\n msg = \"'default_channels' is only configurable for system installs\"\n raise NotImplementedError(msg)\n arglist = rc_config.setdefault(key, [])\n if item in arglist:\n # Right now, all list keys should not contain duplicates\n message = \"Warning: '%s' already in '%s' list, moving to the %s\" % (\n item, key, \"top\" if prepend else \"bottom\")\n arglist = rc_config[key] = [p for p in arglist if p != item]\n if not context.json:\n print(message, file=sys.stderr)\n else:\n json_warnings.append(message)\n arglist.insert(0 if prepend else len(arglist), item)\n\n # Set\n for key, item in args.set:\n primitive_parameters = [p for p in context.list_parameters()\n if context.describe_parameter(p)['parameter_type'] == 'primitive']\n if key not in primitive_parameters:\n raise CondaValueError(\"Key '%s' is not a known primitive parameter.\" % key)\n value = context.typify_parameter(key, item)\n rc_config[key] = value\n\n # Remove\n for key, item in args.remove:\n if key not in rc_config:\n if key != 'channels':\n raise CondaKeyError(key, \"key %r is not in the config file\" % key)\n rc_config[key] = ['defaults']\n if item not in rc_config[key]:\n raise CondaKeyError(key, \"%r is not in the %r key of the config file\" %\n (item, key))\n rc_config[key] = [i for i in rc_config[key] if i != item]\n\n # Remove Key\n for key, in args.remove_key:\n if key not in rc_config:\n raise CondaKeyError(key, \"key %r is not in the config file\" %\n key)\n del rc_config[key]\n\n # config.rc_keys\n if not args.get:\n try:\n with open(rc_path, 'w') as rc:\n rc.write(yaml_dump(rc_config))\n except (IOError, OSError) as e:\n raise CondaError('Cannot write to condarc file at %s\\n'\n 'Caused by %r' % (rc_path, e))\n\n if context.json:\n stdout_json_success(\n rc_path=rc_path,\n warnings=json_warnings,\n get=json_get\n )\n return\n",
"path": "conda/cli/main_config.py"
}
] | diff --git a/conda/cli/main_config.py b/conda/cli/main_config.py
index 1d44ce1468b..33b44afecc9 100644
--- a/conda/cli/main_config.py
+++ b/conda/cli/main_config.py
@@ -423,8 +423,12 @@ def clean_element_type(element_types):
# config.rc_keys
if not args.get:
- with open(rc_path, 'w') as rc:
- rc.write(yaml_dump(rc_config))
+ try:
+ with open(rc_path, 'w') as rc:
+ rc.write(yaml_dump(rc_config))
+ except (IOError, OSError) as e:
+ raise CondaError('Cannot write to condarc file at %s\n'
+ 'Caused by %r' % (rc_path, e))
if context.json:
stdout_json_success(
| conda config stack trace when can't write config file
This situation should be handled nicer. `conda config` doesn't have permission to write the config file.
Thanks.
```
An unexpected error has occurred, please consider sending the
following traceback to the conda GitHub issue tracker at:
https://github.com/conda/conda/issues
Include the output of the command 'conda info' in your report.
Traceback (most recent call last):
File "/opt/anaconda/bin/conda", line 5, in <module>
sys.exit(main())
File "/opt/anaconda/lib/python2.7/site-packages/conda/cli/main.py", line 179, in main
args.func(args, p)
File "/opt/anaconda/lib/python2.7/site-packages/conda/cli/main_config.py", line 339, in execute
with open(rc_path, 'w') as rc:
IOError: [Errno 13] Permission denied: '/opt/anaconda/.condarc'
```
<!---
@huboard:{"order":9.781875224740546e-29,"custom_state":""}
-->
|
evennia__evennia-3042 | [
{
"content": "\"\"\"\nXYZGrid - Griatch 2021\n\n\"\"\"\n\nfrom . import commands # noqa\nfrom . import example # noqa\nfrom . import launchcmd # noqa\nfrom . import prototypes # noqa\nfrom . import tests # noqa\nfrom . import utils # noqa\nfrom . import xymap # noqa\nfrom . import xymap_legend # noqa\nfrom . import xyzgrid # noqa\nfrom . import xyzroom # noqa\n",
"path": "evennia/contrib/grid/xyzgrid/__init__.py"
}
] | [
{
"content": "\"\"\"\nXYZGrid - Griatch 2021\n\n\"\"\"\nfrom . import (\n example,\n launchcmd,\n prototypes,\n tests,\n utils,\n xymap,\n xymap_legend,\n xyzgrid,\n xyzroom,\n commands,\n)\n",
"path": "evennia/contrib/grid/xyzgrid/__init__.py"
}
] | diff --git a/evennia/contrib/grid/xyzgrid/__init__.py b/evennia/contrib/grid/xyzgrid/__init__.py
index a33e47b6032..8fdc8700099 100644
--- a/evennia/contrib/grid/xyzgrid/__init__.py
+++ b/evennia/contrib/grid/xyzgrid/__init__.py
@@ -2,14 +2,15 @@
XYZGrid - Griatch 2021
"""
-
-from . import commands # noqa
-from . import example # noqa
-from . import launchcmd # noqa
-from . import prototypes # noqa
-from . import tests # noqa
-from . import utils # noqa
-from . import xymap # noqa
-from . import xymap_legend # noqa
-from . import xyzgrid # noqa
-from . import xyzroom # noqa
+from . import (
+ example,
+ launchcmd,
+ prototypes,
+ tests,
+ utils,
+ xymap,
+ xymap_legend,
+ xyzgrid,
+ xyzroom,
+ commands,
+)
| [BUG] "evennia xyzgrid help" causes TypeError: NoneType takes no arguments
#### Describe the bug
Fresh migration from git master to main and then installing xyzgrid prevents evennia xyzgrid commands from working. For example, "evennia xyzgrid help" causes TypeError: NoneType takes no arguments
#### To Reproduce
1. Migrated from git master branch to main branch for 1.x release of Evennia.
2. Installed [extra](use to be in requirements_extra).
At this point, I can run the server and log in.
3. Added the xyzgrid command set and restarted.
'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.
4. Modify the server/conf/settings.py.
xyzgrid is now available.
When I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:
TypeError: NoneType takes no arguments
#### Expected behavior
'evennia xyzgrid <command>' should call the xyzgrid command.
#### Environment, Evennia version, OS etc
Evennia 1.0.1 (rev 38011cc48d)
OS: nt
Python: 3.11.1
Twisted: 22.10.0
Django: 4.1.4
#### Additional context
This is based off helix4's message in #general on discord. I added my current steps that seem to reproduce the same issue down below. Here is the original message from helix4, with steps for reproducing on the older version of the code.
I am trying to test XYZGrid on a brand new install but failing. 1. cloned the single branch of evennia-develop, and initiated an evennia game. 2. installed requirements_extra, and migrated. I can run the server and log in. 3. i added the command set and reloadead, i see path, goto, map ingame. the Limbo room does not have a map. seems to work well. 4. modify the server/conf/settings.py, xyzgrid is now available.
When I use xyzgrid, such as evennia xyzgrid help, or any other xyzgrid command:
from evennia.utils.eveditor import EvEditor
File "/home/ubuntu/3ProjectMUD/evennia/evennia/utils/eveditor.py", line 201, in <module>
class SaveYesNoCmdSet(CmdSet):
TypeError: NoneType takes no arguments
Original message
https://discord.com/channels/246323978879107073/246323978879107073/937578545704730624
Griatch's response
https://discord.com/channels/246323978879107073/246323978879107073/937610453184561183
Steps:
1. Migrated from git master branch to main branch for 1.x release of Evennia.
2. Installed [extra](use to be in requirements_extra).
At this point, I can run the server and log in.
3. Added the xyzgrid command set and restarted.
'path', 'goto', 'map' are seen in the command list. The Limbo room does not have a map. Everything appears to work fine.
4. Modify the server/conf/settings.py.
xyzgrid is now available.
When I use xyzgrid, such as 'evennia xyzgrid help', or any other xyzgrid command:
Traceback (most recent call last):
File "C:\muddev\evenv\Scripts\evennia_launcher.py", line 18, in <module>
main()
File "C:\muddev\evennia\evennia\server\evennia_launcher.py", line 2422, in main
if run_custom_commands(option, *unknown_args):
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "C:\muddev\evennia\evennia\server\evennia_launcher.py", line 2023, in run_custom_commands
mod = importlib.import_module(modpath)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
...
File "<frozen importlib._bootstrap>", line 1206, in _gcd_import
File "<frozen importlib._bootstrap>", line 1178, in _find_and_load
File "<frozen importlib._bootstrap>", line 1128, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "<frozen importlib._bootstrap>", line 1206, in _gcd_import
File "<frozen importlib._bootstrap>", line 1178, in _find_and_load
File "<frozen importlib._bootstrap>", line 1149, in _find_and_load_unlocked
File "<frozen importlib._bootstrap>", line 690, in _load_unlocked
File "<frozen importlib._bootstrap_external>", line 940, in exec_module
File "<frozen importlib._bootstrap>", line 241, in _call_with_frames_removed
File "C:\muddev\evennia\evennia\contrib\grid\xyzgrid\__init__.py", line 6, in <module>
from . import commands # noqa
^^^^^^^^^^^^^^^^^^^^^^
File "C:\muddev\evennia\evennia\contrib\grid\xyzgrid\commands.py", line 15, in <module>
from evennia.commands.default import building
File "C:\muddev\evennia\evennia\commands\default\building.py", line 14, in <module>
from evennia.prototypes import menus as olc_menus
File "C:\muddev\evennia\evennia\prototypes\menus.py", line 20, in <module>
from evennia.utils.evmenu import EvMenu, list_node
File "C:\muddev\evennia\evennia\utils\evmenu.py", line 350, in <module>
class CmdEvMenuNode(Command):
TypeError: NoneType takes no arguments
|
mkdocs__mkdocs-3700 | [
{
"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n",
"path": "mkdocs/theme.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport logging\nimport os\nimport warnings\nfrom typing import Any, Collection, MutableMapping\n\nimport jinja2\nimport yaml\n\ntry:\n from yaml import CSafeLoader as SafeLoader\nexcept ImportError: # pragma: no cover\n from yaml import SafeLoader # type: ignore\n\nfrom mkdocs import localization, utils\nfrom mkdocs.config.base import ValidationError\nfrom mkdocs.utils import templates\n\nlog = logging.getLogger(__name__)\n\n\nclass Theme(MutableMapping[str, Any]):\n \"\"\"\n A Theme object.\n\n Args:\n name: The name of the theme as defined by its entrypoint.\n custom_dir: User defined directory for custom templates.\n static_templates: A list of templates to render as static pages.\n\n All other keywords are passed as-is and made available as a key/value mapping.\n \"\"\"\n\n def __init__(\n self,\n name: str | None = None,\n *,\n custom_dir: str | None = None,\n static_templates: Collection[str] = (),\n locale: str | None = None,\n **user_config,\n ) -> None:\n self.name = name\n self._custom_dir = custom_dir\n _vars: dict[str, Any] = {'name': name, 'locale': 'en'}\n self.__vars = _vars\n\n # MkDocs provided static templates are always included\n package_dir = os.path.abspath(os.path.dirname(__file__))\n mkdocs_templates = os.path.join(package_dir, 'templates')\n self.static_templates = set(os.listdir(mkdocs_templates))\n\n # Build self.dirs from various sources in order of precedence\n self.dirs = []\n\n if custom_dir is not None:\n self.dirs.append(custom_dir)\n\n if name:\n self._load_theme_config(name)\n\n # Include templates provided directly by MkDocs (outside any theme)\n self.dirs.append(mkdocs_templates)\n\n # Handle remaining user configs. Override theme configs (if set)\n self.static_templates.update(static_templates)\n _vars.update(user_config)\n\n # Validate locale and convert to Locale object\n if locale is None:\n locale = _vars['locale']\n _vars['locale'] = localization.parse_locale(locale)\n\n name: str | None\n\n @property\n def locale(self) -> localization.Locale:\n return self['locale']\n\n @property\n def custom_dir(self) -> str | None:\n return self._custom_dir\n\n @property\n def _vars(self) -> dict[str, Any]:\n warnings.warn(\n \"Do not access Theme._vars, instead access the keys of Theme directly.\",\n DeprecationWarning,\n )\n return self.__vars\n\n dirs: list[str]\n\n static_templates: set[str]\n\n def __repr__(self) -> str:\n return \"{}(name={!r}, dirs={!r}, static_templates={!r}, {})\".format(\n self.__class__.__name__,\n self.name,\n self.dirs,\n self.static_templates,\n ', '.join(f'{k}={v!r}' for k, v in self.items()),\n )\n\n def __getitem__(self, key: str) -> Any:\n return self.__vars[key]\n\n def __setitem__(self, key: str, value):\n self.__vars[key] = value\n\n def __delitem__(self, key: str):\n del self.__vars[key]\n\n def __contains__(self, item: object) -> bool:\n return item in self.__vars\n\n def __len__(self):\n return len(self.__vars)\n\n def __iter__(self):\n return iter(self.__vars)\n\n def _load_theme_config(self, name: str) -> None:\n \"\"\"Recursively load theme and any parent themes.\"\"\"\n theme_dir = utils.get_theme_dir(name)\n utils.get_themes.cache_clear()\n self.dirs.append(theme_dir)\n\n try:\n file_path = os.path.join(theme_dir, 'mkdocs_theme.yml')\n with open(file_path, 'rb') as f:\n theme_config = yaml.load(f, SafeLoader)\n except OSError as e:\n log.debug(e)\n raise ValidationError(\n f\"The theme '{name}' does not appear to have a configuration file. \"\n f\"Please upgrade to a current version of the theme.\"\n )\n\n if theme_config is None:\n theme_config = {}\n\n log.debug(f\"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}\")\n\n if parent_theme := theme_config.pop('extends', None):\n themes = utils.get_theme_names()\n if parent_theme not in themes:\n raise ValidationError(\n f\"The theme '{name}' inherits from '{parent_theme}', which does not appear to be installed. \"\n f\"The available installed themes are: {', '.join(themes)}\"\n )\n self._load_theme_config(parent_theme)\n\n self.static_templates.update(theme_config.pop('static_templates', []))\n self.__vars.update(theme_config)\n\n def get_env(self) -> jinja2.Environment:\n \"\"\"Return a Jinja environment for the theme.\"\"\"\n loader = jinja2.FileSystemLoader(self.dirs)\n # No autoreload because editing a template in the middle of a build is not useful.\n env = jinja2.Environment(loader=loader, auto_reload=False)\n env.filters['url'] = templates.url_filter\n env.filters['script_tag'] = templates.script_tag_filter\n localization.install_translations(env, self.locale, self.dirs)\n return env\n",
"path": "mkdocs/theme.py"
}
] | diff --git a/mkdocs/tests/theme_tests.py b/mkdocs/tests/theme_tests.py
index f9989fb3f4..ddc526bfed 100644
--- a/mkdocs/tests/theme_tests.py
+++ b/mkdocs/tests/theme_tests.py
@@ -104,3 +104,22 @@ def test_inherited_theme(self):
],
)
self.assertEqual(theme.static_templates, {'sitemap.xml', 'child.html', 'parent.html'})
+
+ def test_empty_config_file(self):
+ # Test for themes with *empty* mkdocs_theme.yml.
+ # See https://github.com/mkdocs/mkdocs/issues/3699
+ m = mock.Mock(
+ # yaml.load returns "None" for an empty file
+ side_effect=[None]
+ )
+ with mock.patch('yaml.load', m) as m:
+ theme = Theme(name='mkdocs')
+ # Should only have the default name and locale __vars set in
+ # Theme.__init__()
+ self.assertEqual(
+ dict(theme),
+ {
+ 'name': 'mkdocs',
+ 'locale': parse_locale('en'),
+ },
+ )
diff --git a/mkdocs/theme.py b/mkdocs/theme.py
index bf5872015e..b45f107920 100644
--- a/mkdocs/theme.py
+++ b/mkdocs/theme.py
@@ -138,6 +138,9 @@ def _load_theme_config(self, name: str) -> None:
f"Please upgrade to a current version of the theme."
)
+ if theme_config is None:
+ theme_config = {}
+
log.debug(f"Loaded theme configuration for '{name}' from '{file_path}': {theme_config}")
if parent_theme := theme_config.pop('extends', None):
| Empty mkdocs_theme.yml breaks build
Hello! In the docs its [stated](https://www.mkdocs.org/dev-guide/themes/#theme-configuration) that a theme **can** have an empty `mkdocs_theme.yml` file:
> However, if the theme offers no configuration options, the file is still required and can be left blank.
Unfortunately this seems to have changed recently and now themes with empty `mkdocs_theme.yml` files are causing an exception when building:
```shell
> mkdocs build --verbose
DEBUG - Loading configuration file: ./mkdocs.yml
DEBUG - Loaded theme configuration for 'custom_theme' from
'./venv/lib/python3.12/site-packages/custom_theme/mkdocs_theme.yml':
None
Traceback (most recent call last):
[...]
File "./venv/lib/python3.12/site-packages/mkdocs/config/config_options.py", line 868, in run_validation
return theme.Theme(**theme_config)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 61, in __init__
self._load_theme_config(name)
File "./venv/lib/python3.12/site-packages/mkdocs/theme.py", line 143, in _load_theme_config
if parent_theme := theme_config.pop('extends', None):
^^^^^^^^^^^^^^^^
AttributeError: 'NoneType' object has no attribute 'pop'
```
|
tensorflow__tensor2tensor-1557 | [
{
"content": "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train and evaluate.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport sys\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\n\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import hparams_lib\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\nimport tensorflow as tf\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See utils/flags.py for additional command-line flags.\nflags.DEFINE_string(\"t2t_usr_dir\", None,\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_model calls, that will then be \"\n \"available to the t2t-trainer.\")\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"tpu_num_shards\", 8, \"Number of tpu shards.\")\nflags.DEFINE_string(\"tpu_job_name\", None,\n \"TPU job name. TPUEstimator can auto-infer this but if the \"\n \"configuration is esoteric it should be provided here.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU.\")\nflags.DEFINE_bool(\"use_tpu_estimator\", False, \"Whether to use TPUEstimator. \"\n \"This is always enabled when use_tpu is True.\")\nflags.DEFINE_bool(\"xla_compile\", False,\n \"Whether to use XLA to compile model_fn.\")\nflags.DEFINE_integer(\"xla_jit_level\", -1,\n \"GlobalJitLevel to use while compiling the full graph.\")\nflags.DEFINE_integer(\"tpu_infeed_sleep_secs\", None,\n \"How long to sleep the infeed thread.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_bool(\"profile\", False, \"Profile performance?\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True.\nflags.DEFINE_bool(\n \"optionally_use_dist_strat\", False,\n \"Whether to use TensorFlow DistributionStrategy instead of explicitly \"\n \"replicating the model. DistributionStrategy is used only if the \"\n \"model replication configuration is supported by the DistributionStrategy.\")\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\nflags.DEFINE_string(\"std_server_protocol\", \"grpc\",\n \"Protocol for tf.train.Server.\")\n\n# Google Cloud TPUs\nflags.DEFINE_string(\"cloud_tpu_name\", \"%s-tpu\" % os.getenv(\"USER\"),\n \"Name of Cloud TPU instance to use or create.\")\n\n# Google Cloud ML Engine\nflags.DEFINE_bool(\"cloud_mlengine\", False,\n \"Whether to launch on Cloud ML Engine.\")\nflags.DEFINE_string(\"cloud_mlengine_master_type\", None,\n \"Machine type for master on Cloud ML Engine. \"\n \"If provided, overrides default selections based on \"\n \"--worker_gpu. User is responsible for ensuring \"\n \"type is valid and that --worker_gpu matches number of \"\n \"GPUs on machine type. See documentation: \"\n \"https://cloud.google.com/ml-engine/reference/rest/v1/\"\n \"projects.jobs#traininginput\")\n# Hyperparameter tuning on Cloud ML Engine\n# Pass an --hparams_range to enable\nflags.DEFINE_string(\"autotune_objective\", None,\n \"TensorBoard metric name to optimize.\")\nflags.DEFINE_bool(\"autotune_maximize\", True,\n \"Whether to maximize (vs. minimize) autotune_objective.\")\nflags.DEFINE_integer(\"autotune_max_trials\", 10,\n \"Maximum number of tuning experiments to run.\")\nflags.DEFINE_integer(\"autotune_parallel_trials\", 1,\n \"How many trials to run in parallel (will spin up this \"\n \"many jobs.\")\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_string(\"job-dir\", None,\n \"DO NOT USE. Exists only for Cloud ML Engine to pass in \"\n \"during hyperparameter tuning. Overrides --output_dir.\")\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n if FLAGS.use_tpu and \"tpu\" not in FLAGS.hparams_set:\n tf.logging.warn(\"Not all hyperparameter sets work on TPU. \"\n \"Prefer hparams_sets with a '_tpu' suffix, \"\n \"e.g. transformer_tpu, if available for your model.\")\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_experiment_fn():\n return trainer_lib.create_experiment_fn(\n model_name=FLAGS.model,\n problem_name=FLAGS.problem,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n min_eval_frequency=FLAGS.local_eval_frequency,\n schedule=FLAGS.schedule,\n eval_throttle_seconds=FLAGS.eval_throttle_seconds,\n export=FLAGS.export_saved_model,\n decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),\n use_tfdbg=FLAGS.tfdbg,\n use_dbgprofile=FLAGS.dbgprofile,\n eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,\n eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,\n eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,\n eval_early_stopping_metric_minimize=FLAGS\n .eval_early_stopping_metric_minimize,\n eval_timeout_mins=FLAGS.eval_timeout_mins,\n eval_use_test_set=FLAGS.eval_use_test_set,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n use_xla=FLAGS.xla_compile,\n warm_start_from=FLAGS.warm_start_from,\n decode_from_file=FLAGS.decode_from_file,\n decode_to_file=FLAGS.decode_to_file,\n decode_reference=FLAGS.decode_reference,\n std_server_protocol=FLAGS.std_server_protocol)\n\n\ndef create_run_config(hp, output_dir=None):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n output_dir: model's output directory, defaults to output_dir flag.\n\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.output_dir or FLAGS.checkpoint_path\n tpu_config_extra_kwargs = {}\n if FLAGS.tpu_job_name is not None:\n tpu_config_extra_kwargs[\"tpu_job_name\"] = FLAGS.tpu_job_name\n\n if getattr(hp, \"mtf_mode\", False):\n save_ckpt_steps = None # Disable the default saver\n save_ckpt_secs = None # Disable the default saver\n tpu_config_extra_kwargs = {\n \"num_cores_per_replica\": 1,\n \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n }\n\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and\n hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n xla_jit_level=FLAGS.xla_jit_level,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n cloud_tpu_name=FLAGS.cloud_tpu_name)\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\[email protected]\ndef profile_context():\n if FLAGS.profile:\n with tf.contrib.tfprof.ProfileContext(\n \"t2tprof\", trace_steps=range(100), dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n\n\ndef maybe_log_registry_and_exit():\n if FLAGS.registry_help:\n tf.logging.info(registry.help_string())\n sys.exit(0)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_evaluate\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save FLAGS in txt file\n if hasattr(FLAGS, \"flags_into_string\"):\n flags_str = FLAGS.flags_into_string()\n t2t_flags_str = \"\\n\".join([\n \"--%s=%s\" % (f.name, f.value)\n for f in FLAGS.flags_by_module_dict()[\"tensor2tensor.utils.flags\"]\n ])\n else:\n flags_dict = FLAGS.__dict__[\"__flags\"]\n flags_str = \"\\n\".join(\n [\"--%s=%s\" % (name, str(f)) for (name, f) in flags_dict.items()])\n t2t_flags_str = None\n\n flags_txt = os.path.join(output_dir, \"flags.txt\")\n with tf.gfile.Open(flags_txt, \"w\") as f:\n f.write(flags_str)\n\n if t2t_flags_str:\n t2t_flags_txt = os.path.join(output_dir, \"flags_t2t.txt\")\n with tf.gfile.Open(t2t_flags_txt, \"w\") as f:\n f.write(t2t_flags_str)\n\n # Save hparams as hparams.json\n new_hparams = hparams_lib.copy_hparams(hparams)\n # Modality class is not JSON serializable so remove.\n new_hparams.del_hparam(\"modality\")\n\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(new_hparams.to_json(indent=0, sort_keys=True))\n\n\ndef execute_schedule(exp):\n if not hasattr(exp, FLAGS.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % FLAGS.schedule)\n with profile_context():\n getattr(exp, FLAGS.schedule)()\n\n\ndef run_std_server():\n exp = trainer_lib.T2TExperiment(*([None] * 5))\n exp.run_std_server()\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # If we just have to print the registry, do that and exit early.\n maybe_log_registry_and_exit()\n\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n hparams = create_hparams()\n\n if FLAGS.schedule == \"train\" or FLAGS.schedule == \"train_eval_and_decode\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)\n if FLAGS.schedule == \"run_std_server\":\n run_std_server()\n mlperf_log.transformer_print(\n key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,\n hparams=hparams)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n exp_fn = create_experiment_fn()\n exp = exp_fn(create_run_config(hparams), hparams)\n if is_chief():\n save_metadata(hparams)\n execute_schedule(exp)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,\n hparams=hparams)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n",
"path": "tensor2tensor/bin/t2t_trainer.py"
}
] | [
{
"content": "# coding=utf-8\n# Copyright 2019 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Train and evaluate.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport contextlib\nimport os\nimport sys\nfrom tensor2tensor import models # pylint: disable=unused-import\nfrom tensor2tensor import problems as problems_lib # pylint: disable=unused-import\nfrom tensor2tensor.data_generators import problem # pylint: disable=unused-import\n\nfrom tensor2tensor.utils import cloud_mlengine\nfrom tensor2tensor.utils import decoding\nfrom tensor2tensor.utils import flags as t2t_flags # pylint: disable=unused-import\nfrom tensor2tensor.utils import hparams_lib\nfrom tensor2tensor.utils import mlperf_log\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import trainer_lib\nfrom tensor2tensor.utils import usr_dir\nimport tensorflow as tf\n\nfrom tensorflow.contrib.tpu.python.tpu import tpu_config\n\n\nflags = tf.flags\nFLAGS = flags.FLAGS\n\n# See utils/flags.py for additional command-line flags.\nflags.DEFINE_string(\"t2t_usr_dir\", None,\n \"Path to a Python module that will be imported. The \"\n \"__init__.py file should include the necessary imports. \"\n \"The imported files should contain registrations, \"\n \"e.g. @registry.register_model calls, that will then be \"\n \"available to the t2t-trainer.\")\nflags.DEFINE_integer(\"random_seed\", None, \"Random seed.\")\nflags.DEFINE_integer(\"tpu_num_shards\", 8, \"Number of tpu shards.\")\nflags.DEFINE_string(\"tpu_job_name\", None,\n \"TPU job name. TPUEstimator can auto-infer this but if the \"\n \"configuration is esoteric it should be provided here.\")\nflags.DEFINE_integer(\"iterations_per_loop\", 100,\n \"Number of iterations in a TPU training loop.\")\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU.\")\nflags.DEFINE_bool(\"use_tpu_estimator\", False, \"Whether to use TPUEstimator. \"\n \"This is always enabled when use_tpu is True.\")\nflags.DEFINE_bool(\"xla_compile\", False,\n \"Whether to use XLA to compile model_fn.\")\nflags.DEFINE_integer(\"xla_jit_level\", -1,\n \"GlobalJitLevel to use while compiling the full graph.\")\nflags.DEFINE_integer(\"tpu_infeed_sleep_secs\", None,\n \"How long to sleep the infeed thread.\")\nflags.DEFINE_bool(\"generate_data\", False, \"Generate data before training?\")\nflags.DEFINE_string(\"tmp_dir\", \"/tmp/t2t_datagen\",\n \"Temporary storage directory, used if --generate_data.\")\nflags.DEFINE_bool(\"profile\", False, \"Profile performance?\")\nflags.DEFINE_integer(\"inter_op_parallelism_threads\", 0,\n \"Number of inter_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\nflags.DEFINE_integer(\"intra_op_parallelism_threads\", 0,\n \"Number of intra_op_parallelism_threads to use for CPU. \"\n \"See TensorFlow config.proto for details.\")\n# TODO(lukaszkaiser): resolve memory and variable assign issues and set to True.\nflags.DEFINE_bool(\n \"optionally_use_dist_strat\", False,\n \"Whether to use TensorFlow DistributionStrategy instead of explicitly \"\n \"replicating the model. DistributionStrategy is used only if the \"\n \"model replication configuration is supported by the DistributionStrategy.\")\n# To maintain compatibility with some internal libs, we guard against these flag\n# definitions possibly erroring. Apologies for the ugliness.\ntry:\n flags.DEFINE_string(\"master\", \"\", \"Address of TensorFlow master.\")\n flags.DEFINE_string(\"output_dir\", \"\", \"Base output directory for run.\")\n flags.DEFINE_string(\"schedule\", \"continuous_train_and_eval\",\n \"Method of Experiment to run.\")\n flags.DEFINE_integer(\"eval_steps\", 100,\n \"Number of steps in evaluation. By default, eval will \"\n \"stop after eval_steps or when it runs through the eval \"\n \"dataset once in full, whichever comes first, so this \"\n \"can be a very large number.\")\nexcept: # pylint: disable=bare-except\n pass\n\nflags.DEFINE_string(\"std_server_protocol\", \"grpc\",\n \"Protocol for tf.train.Server.\")\n\n# Google Cloud TPUs\nflags.DEFINE_string(\"cloud_tpu_name\", \"%s-tpu\" % os.getenv(\"USER\"),\n \"Name of Cloud TPU instance to use or create.\")\n\n# Google Cloud ML Engine\nflags.DEFINE_bool(\"cloud_mlengine\", False,\n \"Whether to launch on Cloud ML Engine.\")\nflags.DEFINE_string(\"cloud_mlengine_master_type\", None,\n \"Machine type for master on Cloud ML Engine. \"\n \"If provided, overrides default selections based on \"\n \"--worker_gpu. User is responsible for ensuring \"\n \"type is valid and that --worker_gpu matches number of \"\n \"GPUs on machine type. See documentation: \"\n \"https://cloud.google.com/ml-engine/reference/rest/v1/\"\n \"projects.jobs#traininginput\")\n# Hyperparameter tuning on Cloud ML Engine\n# Pass an --hparams_range to enable\nflags.DEFINE_string(\"autotune_objective\", None,\n \"TensorBoard metric name to optimize.\")\nflags.DEFINE_bool(\"autotune_maximize\", True,\n \"Whether to maximize (vs. minimize) autotune_objective.\")\nflags.DEFINE_integer(\"autotune_max_trials\", 10,\n \"Maximum number of tuning experiments to run.\")\nflags.DEFINE_integer(\"autotune_parallel_trials\", 1,\n \"How many trials to run in parallel (will spin up this \"\n \"many jobs.\")\n# Note than in open-source TensorFlow, the dash gets converted to an underscore,\n# so access is FLAGS.job_dir.\nflags.DEFINE_string(\"job-dir\", None,\n \"DO NOT USE. Exists only for Cloud ML Engine to pass in \"\n \"during hyperparameter tuning. Overrides --output_dir.\")\nflags.DEFINE_integer(\"log_step_count_steps\", 100,\n \"Number of local steps after which progress is printed \"\n \"out\")\n\n\n\ndef set_hparams_from_args(args):\n \"\"\"Set hparams overrides from unparsed args list.\"\"\"\n if not args:\n return\n\n hp_prefix = \"--hp_\"\n tf.logging.info(\"Found unparsed command-line arguments. Checking if any \"\n \"start with %s and interpreting those as hparams \"\n \"settings.\", hp_prefix)\n\n pairs = []\n i = 0\n while i < len(args):\n arg = args[i]\n if arg.startswith(hp_prefix):\n pairs.append((arg[len(hp_prefix):], args[i+1]))\n i += 2\n else:\n tf.logging.warn(\"Found unknown flag: %s\", arg)\n i += 1\n\n as_hparams = \",\".join([\"%s=%s\" % (key, val) for key, val in pairs])\n if FLAGS.hparams:\n as_hparams = \",\" + as_hparams\n FLAGS.hparams += as_hparams\n\n\ndef create_hparams():\n \"\"\"Create hparams.\"\"\"\n if FLAGS.use_tpu and \"tpu\" not in FLAGS.hparams_set:\n tf.logging.warn(\"Not all hyperparameter sets work on TPU. \"\n \"Prefer hparams_sets with a '_tpu' suffix, \"\n \"e.g. transformer_tpu, if available for your model.\")\n hparams_path = os.path.join(FLAGS.output_dir, \"hparams.json\")\n return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,\n hparams_path=hparams_path)\n\n\ndef create_experiment_fn():\n return trainer_lib.create_experiment_fn(\n model_name=FLAGS.model,\n problem_name=FLAGS.problem,\n data_dir=os.path.expanduser(FLAGS.data_dir),\n train_steps=FLAGS.train_steps,\n eval_steps=FLAGS.eval_steps,\n min_eval_frequency=FLAGS.local_eval_frequency,\n schedule=FLAGS.schedule,\n eval_throttle_seconds=FLAGS.eval_throttle_seconds,\n export=FLAGS.export_saved_model,\n decode_hparams=decoding.decode_hparams(FLAGS.decode_hparams),\n use_tfdbg=FLAGS.tfdbg,\n use_dbgprofile=FLAGS.dbgprofile,\n eval_early_stopping_steps=FLAGS.eval_early_stopping_steps,\n eval_early_stopping_metric=FLAGS.eval_early_stopping_metric,\n eval_early_stopping_metric_delta=FLAGS.eval_early_stopping_metric_delta,\n eval_early_stopping_metric_minimize=FLAGS\n .eval_early_stopping_metric_minimize,\n eval_timeout_mins=FLAGS.eval_timeout_mins,\n eval_use_test_set=FLAGS.eval_use_test_set,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n use_xla=FLAGS.xla_compile,\n warm_start_from=FLAGS.warm_start_from,\n decode_from_file=FLAGS.decode_from_file,\n decode_to_file=FLAGS.decode_to_file,\n decode_reference=FLAGS.decode_reference,\n std_server_protocol=FLAGS.std_server_protocol)\n\n\ndef create_run_config(hp, output_dir=None):\n \"\"\"Create a run config.\n\n Args:\n hp: model hyperparameters\n output_dir: model's output directory, defaults to output_dir flag.\n\n Returns:\n a run config\n \"\"\"\n save_ckpt_steps = max(FLAGS.iterations_per_loop, FLAGS.local_eval_frequency)\n save_ckpt_secs = FLAGS.save_checkpoints_secs or None\n if save_ckpt_secs:\n save_ckpt_steps = None\n assert FLAGS.output_dir or FLAGS.checkpoint_path\n tpu_config_extra_kwargs = {}\n if FLAGS.tpu_job_name is not None:\n tpu_config_extra_kwargs[\"tpu_job_name\"] = FLAGS.tpu_job_name\n\n if getattr(hp, \"mtf_mode\", False):\n save_ckpt_steps = None # Disable the default saver\n save_ckpt_secs = None # Disable the default saver\n tpu_config_extra_kwargs = {\n \"num_cores_per_replica\": 1,\n \"per_host_input_for_training\": tpu_config.InputPipelineConfig.BROADCAST,\n }\n\n # the various custom getters we have written do not play well together yet.\n # TODO(noam): ask rsepassi for help here.\n daisy_chain_variables = (\n hp.daisy_chain_variables and\n hp.activation_dtype == \"float32\" and\n hp.weight_dtype == \"float32\")\n return trainer_lib.create_run_config(\n model_name=FLAGS.model,\n model_dir=output_dir or os.path.expanduser(FLAGS.output_dir),\n master=FLAGS.master,\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.tpu_num_shards,\n log_device_placement=FLAGS.log_device_placement,\n save_checkpoints_steps=save_ckpt_steps,\n save_checkpoints_secs=save_ckpt_secs,\n keep_checkpoint_max=FLAGS.keep_checkpoint_max,\n keep_checkpoint_every_n_hours=FLAGS.keep_checkpoint_every_n_hours,\n num_gpus=FLAGS.worker_gpu,\n gpu_order=FLAGS.gpu_order,\n num_async_replicas=FLAGS.worker_replicas,\n gpu_mem_fraction=FLAGS.worker_gpu_memory_fraction,\n enable_graph_rewriter=FLAGS.enable_graph_rewriter,\n use_tpu=FLAGS.use_tpu,\n use_tpu_estimator=FLAGS.use_tpu_estimator,\n xla_jit_level=FLAGS.xla_jit_level,\n schedule=FLAGS.schedule,\n no_data_parallelism=hp.no_data_parallelism,\n optionally_use_dist_strat=FLAGS.optionally_use_dist_strat,\n daisy_chain_variables=daisy_chain_variables,\n ps_replicas=FLAGS.ps_replicas,\n ps_job=FLAGS.ps_job,\n ps_gpu=FLAGS.ps_gpu,\n sync=FLAGS.sync,\n worker_id=FLAGS.worker_id,\n worker_job=FLAGS.worker_job,\n random_seed=FLAGS.random_seed,\n tpu_infeed_sleep_secs=FLAGS.tpu_infeed_sleep_secs,\n inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads,\n log_step_count_steps=FLAGS.log_step_count_steps,\n intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads,\n tpu_config_extra_kwargs=tpu_config_extra_kwargs,\n cloud_tpu_name=FLAGS.cloud_tpu_name)\n\n\ndef generate_data():\n # Generate data if requested.\n data_dir = os.path.expanduser(FLAGS.data_dir)\n tmp_dir = os.path.expanduser(FLAGS.tmp_dir)\n tf.gfile.MakeDirs(data_dir)\n tf.gfile.MakeDirs(tmp_dir)\n\n problem_name = FLAGS.problem\n tf.logging.info(\"Generating data for %s\" % problem_name)\n registry.problem(problem_name).generate_data(data_dir, tmp_dir)\n\n\[email protected]\ndef profile_context():\n if FLAGS.profile:\n with tf.contrib.tfprof.ProfileContext(\n \"t2tprof\", trace_steps=range(100), dump_steps=range(100)) as pctx:\n opts = tf.profiler.ProfileOptionBuilder.time_and_memory()\n pctx.add_auto_profiling(\"op\", opts, range(100))\n yield\n else:\n yield\n\n\ndef maybe_log_registry_and_exit():\n if FLAGS.registry_help:\n tf.logging.info(registry.help_string())\n sys.exit(0)\n\n\ndef is_chief():\n schedules = [\"train\", \"train_and_evaluate\", \"continuous_train_and_eval\"]\n return FLAGS.worker_id == 0 and FLAGS.schedule in schedules\n\n\ndef save_metadata(hparams):\n \"\"\"Saves FLAGS and hparams to output_dir.\"\"\"\n output_dir = os.path.expanduser(FLAGS.output_dir)\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n # Save FLAGS in txt file\n if hasattr(FLAGS, \"flags_into_string\"):\n flags_str = FLAGS.flags_into_string()\n t2t_flags_str = \"\\n\".join([\n \"--%s=%s\" % (f.name, f.value)\n for f in FLAGS.flags_by_module_dict()[\"tensor2tensor.utils.flags\"]\n ])\n else:\n flags_dict = FLAGS.__dict__[\"__flags\"]\n flags_str = \"\\n\".join(\n [\"--%s=%s\" % (name, str(f)) for (name, f) in flags_dict.items()])\n t2t_flags_str = None\n\n flags_txt = os.path.join(output_dir, \"flags.txt\")\n with tf.gfile.Open(flags_txt, \"w\") as f:\n f.write(flags_str)\n\n if t2t_flags_str:\n t2t_flags_txt = os.path.join(output_dir, \"flags_t2t.txt\")\n with tf.gfile.Open(t2t_flags_txt, \"w\") as f:\n f.write(t2t_flags_str)\n\n # Save hparams as hparams.json\n new_hparams = hparams_lib.copy_hparams(hparams)\n # Modality class is not JSON serializable so remove.\n new_hparams.del_hparam(\"modality\")\n\n hparams_fname = os.path.join(output_dir, \"hparams.json\")\n with tf.gfile.Open(hparams_fname, \"w\") as f:\n f.write(new_hparams.to_json(indent=0, sort_keys=True))\n\n\ndef execute_schedule(exp):\n if not hasattr(exp, FLAGS.schedule):\n raise ValueError(\n \"Experiment has no method %s, from --schedule\" % FLAGS.schedule)\n with profile_context():\n getattr(exp, FLAGS.schedule)()\n\n\ndef run_std_server():\n exp = trainer_lib.T2TExperiment(*([None] * 5))\n exp.run_std_server()\n\n\ndef main(argv):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n usr_dir.import_usr_dir(FLAGS.t2t_usr_dir)\n\n # If we just have to print the registry, do that and exit early.\n maybe_log_registry_and_exit()\n\n # Create HParams.\n if argv:\n set_hparams_from_args(argv[1:])\n if FLAGS.schedule != \"run_std_server\":\n hparams = create_hparams()\n\n if FLAGS.schedule == \"train\" or FLAGS.schedule == \"train_eval_and_decode\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)\n if FLAGS.schedule == \"run_std_server\":\n run_std_server()\n mlperf_log.transformer_print(\n key=mlperf_log.RUN_SET_RANDOM_SEED, value=FLAGS.random_seed,\n hparams=hparams)\n trainer_lib.set_random_seed(FLAGS.random_seed)\n\n if FLAGS.cloud_mlengine:\n cloud_mlengine.launch()\n return\n\n if FLAGS.generate_data:\n generate_data()\n\n if cloud_mlengine.job_dir():\n FLAGS.output_dir = cloud_mlengine.job_dir()\n\n exp_fn = create_experiment_fn()\n exp = exp_fn(create_run_config(hparams), hparams)\n if is_chief():\n save_metadata(hparams)\n execute_schedule(exp)\n if FLAGS.schedule != \"train\":\n mlperf_log.transformer_print(key=mlperf_log.RUN_FINAL,\n hparams=hparams)\n\n\nif __name__ == \"__main__\":\n tf.logging.set_verbosity(tf.logging.INFO)\n tf.app.run()\n",
"path": "tensor2tensor/bin/t2t_trainer.py"
}
] | diff --git a/tensor2tensor/bin/t2t_trainer.py b/tensor2tensor/bin/t2t_trainer.py
index e317f8078..0c76819c6 100644
--- a/tensor2tensor/bin/t2t_trainer.py
+++ b/tensor2tensor/bin/t2t_trainer.py
@@ -372,7 +372,8 @@ def main(argv):
# Create HParams.
if argv:
set_hparams_from_args(argv[1:])
- hparams = create_hparams()
+ if FLAGS.schedule != "run_std_server":
+ hparams = create_hparams()
if FLAGS.schedule == "train" or FLAGS.schedule == "train_eval_and_decode":
mlperf_log.transformer_print(key=mlperf_log.RUN_START, hparams=hparams)
| distributed training on multiple machine fails
### Description
I am trying to do distributed training on multiple machines with 1 GPU each. It is failing on the workers. Please look into this!
...
### Environment information
```
OS: Linux - 18
On master i run -
t2t-trainer --master=grpc://10.10.1.2:2219 --ps_replicas=3 --worker_replicas=1 --worker_gpu=0 --worker_id=0 --ps_gpu=1 --sync --schedule=train --worker_job='/job:master' --model=transformer --hparams_set=transformer_base --problem=translate_ende_wmt32k --data_dir=/users/kshiteej/varunimagenet/tensor2tensor/t2t_data/ --output_dir=/users/kshiteej/
On PS-
1. t2t-trainer --schedule=run_std_server
2. t2t-trainer --schedule=run_std_server
3. t2t-trainer --schedule=run_std_server
OUTPUT of Master -
..
.
.
.
13] Done calling model_fn.
INFO:tensorflow:Create CheckpointSaverHook.
I0331 22:40:02.157696 139967148951360 basic_session_run_hooks.py:527] Create CheckpointSaverHook.
INFO:tensorflow:Graph was finalized.
OUTPUT of Worker -
Traceback (most recent call last):
File "/usr/local/bin/t2t-trainer", line 33, in <module>
tf.app.run()
File "/usr/local/lib/python3.6/dist-packages/tensorflow/python/platform/app.py", line 125, in run
_sys.exit(main(argv))
File "/usr/local/bin/t2t-trainer", line 28, in main
t2t_trainer.main(argv)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py", line 413, in main
hparams = create_hparams()
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/bin/t2t_trainer.py", line 176, in create_hparams
return trainer_lib.create_hparams(FLAGS.hparams_set, FLAGS.hparams,hparams_path=hparams_path)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/hparams_lib.py", line 48, in create_hparams
hparams = registry.hparams(hparams_set)
File "/usr/local/lib/python3.6/dist-packages/tensor2tensor/utils/registry.py", line 254, in __getitem__
(key, self.name, display_list_by_prefix(sorted(self), 4)))
KeyError: 'None never registered with registry hparams. Available:\n adaptive:\n * adaptive_universal_transformer_base\n * adaptive_universal_tr...
..
..
..
$ pip freeze | grep tensor
# your output here
mesh-tensorflow==0.0.5
tensor2tensor==1.13.1
tensorboard==1.13.0
tensorflow-datasets==1.0.1
tensorflow-estimator==1.13.0
tensorflow-gpu==1.13.1
tensorflow-metadata==0.13.0
tensorflow-probability==0.6.0
tensorflow-tensorboard==0.4.0
$ python -V
# your output here
```
Python 2.7.15rc1
### For bugs: reproduction and error logs
```
# Steps to reproduce:
...
```
```
# Error logs:
...
```
|
mindsdb__lightwood-868 | [
{
"content": "# TODO: _add_implicit_values unit test ensures NO changes for a fully specified file.\nfrom copy import deepcopy\nfrom lightwood.helpers.templating import call, inline_dict, align\nfrom lightwood.api import dtype\nfrom lightwood.api.types import (\n JsonAI,\n TypeInformation,\n StatisticalAnalysis,\n ProblemDefinition,\n)\nimport inspect\nfrom lightwood.helpers.log import log\n\n\n# For custom modules, we create a module loader with necessary imports below\nIMPORT_EXTERNAL_DIRS = \"\"\"\nfor import_dir in [os.path.join(os.path.expanduser('~/lightwood_modules'), lightwood_version.replace('.', '_')), os.path.join('/etc/lightwood_modules', lightwood_version.replace('.', '_'))]:\n if os.path.exists(import_dir) and os.access(import_dir, os.R_OK):\n for file_name in list(os.walk(import_dir))[0][2]:\n if file_name[-3:] != '.py':\n continue\n mod_name = file_name[:-3]\n loader = importlib.machinery.SourceFileLoader(mod_name,\n os.path.join(import_dir, file_name))\n module = ModuleType(loader.name)\n loader.exec_module(module)\n sys.modules[mod_name] = module\n exec(f'import {mod_name}')\n\"\"\" # noqa\n\nIMPORTS = \"\"\"\nimport lightwood\nfrom lightwood import __version__ as lightwood_version\nfrom lightwood.analysis import *\nfrom lightwood.api import *\nfrom lightwood.data import *\nfrom lightwood.encoder import *\nfrom lightwood.ensemble import *\nfrom lightwood.helpers.device import *\nfrom lightwood.helpers.general import *\nfrom lightwood.helpers.log import *\nfrom lightwood.helpers.numeric import *\nfrom lightwood.helpers.imputers import *\nfrom lightwood.helpers.parallelism import *\nfrom lightwood.helpers.seed import *\nfrom lightwood.helpers.text import *\nfrom lightwood.helpers.torch import *\nfrom lightwood.mixer import *\nimport pandas as pd\nfrom typing import Dict, List, Union\nimport os\nfrom types import ModuleType\nimport importlib.machinery\nimport sys\nimport time\n\"\"\"\n\n\ndef lookup_encoder(\n col_dtype: str,\n col_name: str,\n is_target: bool,\n problem_defintion: ProblemDefinition,\n is_target_predicting_encoder: bool,\n statistical_analysis: StatisticalAnalysis,\n):\n \"\"\"\n Assign a default encoder for a given column based on its data type, and whether it is a target. Encoders intake raw (but cleaned) data and return an feature representation. This function assigns, per data type, what the featurizer should be. This function runs on each column within the dataset available for model building to assign how it should be featurized.\n\n Users may override to create a custom encoder to enable their own featurization process. However, in order to generate template JSON-AI, this code runs automatically. Users may edit the generated syntax and use custom approaches while model building.\n\n For each encoder, \"args\" may be passed. These args depend an encoder requires during its preparation call.\n\n :param col_dtype: A data-type of a column specified\n :param col_name: The name of the column\n :param is_target: Whether the column is the target for prediction. If true, only certain possible feature representations are allowed, particularly for complex data types.\n :param problem_definition: The ``ProblemDefinition`` criteria; this populates specifics on how models and encoders may be trained.\n :param is_target_predicting_encoder:\n \"\"\" # noqa\n\n tss = problem_defintion.timeseries_settings\n encoder_lookup = {\n dtype.integer: \"NumericEncoder\",\n dtype.float: \"NumericEncoder\",\n dtype.binary: \"BinaryEncoder\",\n dtype.categorical: \"CategoricalAutoEncoder\"\n if statistical_analysis is None\n or len(statistical_analysis.histograms[col_name]) > 100\n else \"OneHotEncoder\",\n dtype.tags: \"MultiHotEncoder\",\n dtype.date: \"DatetimeEncoder\",\n dtype.datetime: \"DatetimeEncoder\",\n dtype.image: \"Img2VecEncoder\",\n dtype.rich_text: \"PretrainedLangEncoder\",\n dtype.short_text: \"CategoricalAutoEncoder\",\n dtype.quantity: \"NumericEncoder\",\n dtype.audio: \"MFCCEncoder\",\n dtype.num_array: \"NumArrayEncoder\",\n dtype.cat_array: \"CatArrayEncoder\",\n dtype.num_tsarray: \"TimeSeriesEncoder\",\n dtype.cat_tsarray: \"TimeSeriesEncoder\",\n }\n\n # If column is a target, only specific feature representations are allowed that enable supervised tasks\n target_encoder_lookup_override = {\n dtype.rich_text: \"VocabularyEncoder\",\n dtype.categorical: \"OneHotEncoder\",\n }\n\n # Assign a default encoder to each column.\n encoder_dict = {\"module\": encoder_lookup[col_dtype], \"args\": {}}\n\n # If the column is a target, ensure that the feature representation can enable supervised tasks\n if is_target:\n encoder_dict[\"args\"] = {\"is_target\": \"True\"}\n\n if col_dtype in target_encoder_lookup_override:\n encoder_dict[\"module\"] = target_encoder_lookup_override[col_dtype]\n\n if col_dtype in (dtype.categorical, dtype.binary):\n if problem_defintion.unbias_target:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = \"$statistical_analysis.target_weights\"\n if problem_defintion.target_weights is not None:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = problem_defintion.target_weights\n\n if col_dtype in (dtype.integer, dtype.float, dtype.num_array, dtype.num_tsarray):\n encoder_dict[\"args\"][\n \"positive_domain\"\n ] = \"$statistical_analysis.positive_domain\"\n\n # Time-series representations require more advanced flags\n if tss.is_timeseries:\n gby = tss.group_by if tss.group_by is not None else []\n if col_name in tss.order_by:\n encoder_dict[\"module\"] = \"ArrayEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n if is_target:\n if col_dtype in [dtype.integer]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if col_dtype in [dtype.float]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if tss.horizon > 1:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"args\"][\"timesteps\"] = f\"{tss.horizon}\"\n if col_dtype in [dtype.num_tsarray]:\n encoder_dict[\"module\"] = \"TsArrayNumericEncoder\"\n elif col_dtype in [dtype.cat_tsarray]:\n encoder_dict[\"module\"] = \"TsCatArrayEncoder\"\n\n if \"__mdb_ts_previous\" in col_name or col_name in tss.historical_columns:\n encoder_dict[\"module\"] = \"TimeSeriesEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n # Set arguments for the encoder\n if encoder_dict[\"module\"] == \"PretrainedLangEncoder\" and not is_target:\n encoder_dict[\"args\"][\"output_type\"] = \"$dtype_dict[$target]\"\n\n if eval(encoder_dict[\"module\"]).is_trainable_encoder:\n encoder_dict[\"args\"][\"stop_after\"] = \"$problem_definition.seconds_per_encoder\"\n\n if is_target_predicting_encoder:\n encoder_dict[\"args\"][\"embed_mode\"] = \"False\"\n return encoder_dict\n\n\ndef generate_json_ai(\n type_information: TypeInformation,\n statistical_analysis: StatisticalAnalysis,\n problem_definition: ProblemDefinition,\n) -> JsonAI:\n \"\"\"\n Given ``TypeInformation``, ``StatisticalAnalysis``, and the ``ProblemDefinition``, generate a JSON config file with the necessary elements of the ML pipeline populated.\n\n :param TypeInformation: Specifies what data types each column within the dataset are\n :param statistical_analysis:\n :param problem_definition: Specifies details of the model training/building procedure, as defined by ``ProblemDefinition``\n\n :returns: JSON-AI object with fully populated details of the ML pipeline\n \"\"\" # noqaexec\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n target = problem_definition.target\n input_cols = []\n tss = problem_definition.timeseries_settings\n dtype_dict = type_information.dtypes\n for k in type_information.identifiers:\n del dtype_dict[k]\n dependency_dict = {}\n\n for col_name, col_dtype in dtype_dict.items():\n if (\n (col_name not in type_information.identifiers\n and col_dtype not in (dtype.invalid, dtype.empty)\n and col_name != target)\n or\n (tss.group_by is not None and col_name in tss.group_by)\n ):\n if col_name != problem_definition.target:\n input_cols.append(col_name)\n\n is_target_predicting_encoder = False\n is_ts = problem_definition.timeseries_settings.is_timeseries\n\n # Single text column classification\n if (\n len(input_cols) == 1\n and type_information.dtypes[input_cols[0]] in (dtype.rich_text)\n and type_information.dtypes[target] in (dtype.categorical, dtype.binary)\n ):\n is_target_predicting_encoder = True\n\n if is_target_predicting_encoder:\n submodels = [\n {\n \"module\": \"Unit\",\n \"args\": {\n \"target_encoder\": \"$encoders[self.target]\",\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n }\n ]\n else:\n submodels = [\n {\n \"module\": \"Neural\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"search_hyperparameters\": True,\n },\n }\n ]\n\n if (not tss.is_timeseries or tss.horizon == 1) and dtype_dict[target] not in (dtype.num_array, dtype.cat_array):\n submodels.extend(\n [\n {\n \"module\": \"LightGBM\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"fit_on_dev\": True,\n },\n },\n {\n \"module\": \"Regression\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n },\n ]\n )\n elif tss.is_timeseries and tss.horizon > 1:\n submodels.extend(\n [\n {\n \"module\": \"LightGBMArray\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n if tss.use_previous_target and dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n submodels.extend(\n [\n {\n \"module\": \"SkTime\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n model = {\n \"module\": \"BestOf\",\n \"args\": {\n \"submodels\": submodels,\n \"args\": \"$pred_args\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"ts_analysis\": \"self.ts_analysis\" if is_ts else None,\n }\n }\n\n if tss.is_timeseries and tss.horizon > 1:\n if dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n dtype_dict[target] = dtype.num_tsarray\n else:\n dtype_dict[target] = dtype.cat_tsarray\n\n encoders = {\n target: lookup_encoder(\n dtype_dict[target],\n target,\n True,\n problem_definition,\n False,\n statistical_analysis,\n )\n }\n\n for col in input_cols:\n encoders[col] = lookup_encoder(\n dtype_dict[col],\n col,\n False,\n problem_definition,\n is_target_predicting_encoder,\n statistical_analysis,\n )\n\n # Decide on the accuracy functions to use\n output_dtype = dtype_dict[target]\n if output_dtype in [\n dtype.integer,\n dtype.float,\n dtype.date,\n dtype.datetime,\n dtype.quantity,\n ]:\n accuracy_functions = [\"r2_score\"]\n elif output_dtype in [dtype.categorical, dtype.tags, dtype.binary]:\n accuracy_functions = [\"balanced_accuracy_score\"]\n elif output_dtype in (dtype.num_array, dtype.num_tsarray):\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n elif output_dtype in (dtype.cat_array, dtype.cat_tsarray):\n accuracy_functions = [\"evaluate_cat_array_accuracy\"]\n else:\n raise Exception(\n f\"Please specify a custom accuracy function for output type {output_dtype}\"\n )\n\n # special dispatch for t+1 time series forecasters\n if is_ts:\n if output_dtype in [dtype.integer, dtype.float]:\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n\n if problem_definition.time_aim is None:\n # 5 days\n problem_definition.time_aim = 3 * 24 * 3600\n\n # Encoders are assigned 1/3 of the time unless a user overrides this (equal time per encoder)\n if problem_definition.seconds_per_encoder is None:\n nr_trainable_encoders = len(\n [\n x\n for x in encoders.values()\n if eval(x[\"module\"]).is_trainable_encoder\n ]\n )\n if nr_trainable_encoders > 0:\n problem_definition.seconds_per_encoder = 0.33 * problem_definition.time_aim / nr_trainable_encoders\n\n # Mixers are assigned 1/3 of the time aim (or 2/3 if there are no trainable encoders )\\\n # unless a user overrides this (equal time per mixer)\n if problem_definition.seconds_per_mixer is None:\n if problem_definition.seconds_per_encoder is None:\n problem_definition.seconds_per_mixer = 0.66 * problem_definition.time_aim / len(model['args']['submodels'])\n else:\n problem_definition.seconds_per_mixer = 0.33 * problem_definition.time_aim / len(model['args']['submodels'])\n\n return JsonAI(\n cleaner=None,\n splitter=None,\n analyzer=None,\n explainer=None,\n encoders=encoders,\n dtype_dict=dtype_dict,\n dependency_dict=dependency_dict,\n model=model,\n problem_definition=problem_definition,\n identifiers=type_information.identifiers,\n timeseries_transformer=None,\n timeseries_analyzer=None,\n accuracy_functions=accuracy_functions,\n )\n\n\ndef _merge_implicit_values(field: dict, implicit_value: dict) -> dict:\n \"\"\"\n Helper function for `_populate_implicit_field`.\n Takes a user-defined field along with its implicit value, and merges them together.\n\n :param field: JsonAI field with user-defined parameters.\n :param implicit_value: implicit values for the field.\n :return: original field with implicit values merged into it.\n \"\"\"\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n module = eval(field[\"module\"])\n\n if inspect.isclass(module):\n args = list(inspect.signature(module.__init__).parameters.keys())[1:]\n else:\n args = module.__code__.co_varnames\n\n for arg in args:\n if \"args\" not in field:\n field[\"args\"] = implicit_value[\"args\"]\n else:\n if arg not in field[\"args\"]:\n if arg in implicit_value[\"args\"]:\n field[\"args\"][arg] = implicit_value[\"args\"][arg]\n\n return field\n\n\ndef _populate_implicit_field(\n json_ai: JsonAI, field_name: str, implicit_value: dict, is_timeseries: bool\n) -> None:\n \"\"\"\n Populate the implicit field of the JsonAI, either by filling it in entirely if missing, or by introspecting the class or function and assigning default values to the args in it's signature that are in the implicit default but haven't been populated by the user\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n :params: field_name: Name of the field the implicit field in ``JsonAI``\n :params: implicit_value: The dictionary containing implicit values for the module and arg in the field\n :params: is_timeseries: Whether or not this is a timeseries problem\n\n :returns: nothing, this method mutates the respective field of the ``JsonAI`` object it receives\n \"\"\" # noqa\n # These imports might be slow, in which case the only <easy> solution is to line this code\n field = json_ai.__getattribute__(field_name)\n if field is None:\n # This if is to only populated timeseries-specific implicit fields for implicit problems\n if is_timeseries or field_name not in (\n \"timeseries_analyzer\",\n \"timeseries_transformer\",\n ):\n field = implicit_value\n\n # If the user specified one or more subfields in a field that's a list\n # Populate them with implicit arguments form the implicit values from that subfield\n elif isinstance(field, list) and isinstance(implicit_value, list):\n for i in range(len(field)):\n sub_field_implicit = [\n x for x in implicit_value if x[\"module\"] == field[i][\"module\"]\n ]\n if len(sub_field_implicit) == 1:\n field[i] = _merge_implicit_values(field[i], sub_field_implicit[0])\n for sub_field_implicit in implicit_value:\n if (\n len([x for x in field if x[\"module\"] == sub_field_implicit[\"module\"]])\n == 0\n ):\n field.append(sub_field_implicit)\n # If the user specified the field, add implicit arguments which we didn't specify\n else:\n field = _merge_implicit_values(field, implicit_value)\n json_ai.__setattr__(field_name, field)\n\n\ndef _add_implicit_values(json_ai: JsonAI) -> JsonAI:\n \"\"\"\n To enable brevity in writing, auto-generate the \"unspecified/missing\" details required in the ML pipeline.\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n\n :returns: ``JSONAI`` object with all necessary parameters that were previously left unmentioned filled in.\n \"\"\"\n problem_definition = json_ai.problem_definition\n tss = problem_definition.timeseries_settings\n is_ts = tss.is_timeseries\n\n # Add implicit arguments\n # @TODO: Consider removing once we have a proper editor in studio\n mixers = json_ai.model['args']['submodels']\n for i in range(len(mixers)):\n if mixers[i][\"module\"] == \"Unit\":\n pass\n elif mixers[i][\"module\"] == \"Neural\":\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"timeseries_settings\"] = mixers[i][\"args\"].get(\n \"timeseries_settings\", \"$problem_definition.timeseries_settings\"\n )\n mixers[i][\"args\"][\"net\"] = mixers[i][\"args\"].get(\n \"net\",\n '\"DefaultNet\"'\n if not tss.is_timeseries or not tss.use_previous_target\n else '\"ArNet\"',\n )\n\n elif mixers[i][\"module\"] == \"LightGBM\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"use_optuna\"] = True\n elif mixers[i][\"module\"] == \"Regression\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"LightGBMArray\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"SkTime\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"ts_analysis\"] = mixers[i][\"args\"].get(\n \"ts_analysis\", \"$ts_analysis\"\n )\n # enforce fit_on_all if this mixer is specified\n problem_definition.fit_on_all = True\n\n json_ai.model[\"args\"][\"target\"] = json_ai.model[\"args\"].get(\"target\", \"$target\")\n json_ai.model[\"args\"][\"data\"] = json_ai.model[\"args\"].get(\"data\", \"encoded_test_data\")\n json_ai.model[\"args\"][\"mixers\"] = json_ai.model[\"args\"].get(\"mixers\", \"$mixers\")\n\n for name in json_ai.encoders:\n if name not in json_ai.dependency_dict:\n json_ai.dependency_dict[name] = []\n\n # Add \"hidden\" fields\n hidden_fields = {\n \"cleaner\": {\n \"module\": \"cleaner\",\n \"args\": {\n \"pct_invalid\": \"$problem_definition.pct_invalid\",\n \"identifiers\": \"$identifiers\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n \"imputers\": \"$imputers\",\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n },\n },\n \"splitter\": {\n \"module\": \"splitter\",\n \"args\": {\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"seed\": 1,\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"pct_train\": 0.8,\n \"pct_dev\": 0.1,\n \"pct_test\": 0.1,\n },\n },\n \"analyzer\": {\n \"module\": \"model_analyzer\",\n \"args\": {\n \"stats_info\": \"$statistical_analysis\",\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"predictor\": \"$ensemble\",\n \"data\": \"encoded_test_data\",\n \"train_data\": \"encoded_train_data\",\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"analysis_blocks\": \"$analysis_blocks\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n },\n },\n \"explainer\": {\n \"module\": \"explain\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n \"data\": \"data\",\n \"encoded_data\": \"encoded_data\",\n \"predictions\": \"df\",\n \"analysis\": \"$runtime_analyzer\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n \"target_name\": \"$target\",\n \"target_dtype\": \"$dtype_dict[self.target]\",\n \"explainer_blocks\": \"$analysis_blocks\",\n \"pred_args\": \"$pred_args\",\n },\n },\n \"analysis_blocks\": [\n {\n \"module\": \"ICP\",\n \"args\": {\n \"fixed_significance\": None,\n \"confidence_normalizer\": False,\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n },\n },\n {\n \"module\": \"AccStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n {\n \"module\": \"ConfStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n ] if problem_definition.use_default_analysis else [],\n \"timeseries_transformer\": {\n \"module\": \"transform_timeseries\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n },\n },\n \"timeseries_analyzer\": {\n \"module\": \"timeseries_analyzer\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n },\n },\n }\n\n for field_name, implicit_value in hidden_fields.items():\n _populate_implicit_field(json_ai, field_name, implicit_value, tss.is_timeseries)\n\n return json_ai\n\n\ndef code_from_json_ai(json_ai: JsonAI) -> str:\n \"\"\"\n Generates a custom ``PredictorInterface`` given the specifications from ``JsonAI`` object.\n\n :param json_ai: ``JsonAI`` object with fully specified parameters\n\n :returns: Automated syntax of the ``PredictorInterface`` object.\n \"\"\"\n json_ai = deepcopy(json_ai)\n # ----------------- #\n # Fill in any missing values\n json_ai = _add_implicit_values(json_ai)\n\n # ----------------- #\n\n # Instantiate data types\n dtype_dict = {}\n\n for k in json_ai.dtype_dict:\n if json_ai.dtype_dict[k] not in (dtype.invalid, dtype.empty):\n dtype_dict[k] = json_ai.dtype_dict[k]\n\n # Populate imputers\n imputer_dict = {}\n if json_ai.imputers:\n for imputer in json_ai.imputers:\n imputer_dict[imputer['args']['target'].replace('\\'', '').replace('\\\"', '')] = call(imputer)\n json_ai.imputers = imputer_dict\n imputers = inline_dict(json_ai.imputers)\n\n # Populate encoders\n encoder_dict = {}\n for col_name, encoder in json_ai.encoders.items():\n encoder_dict[col_name] = call(encoder)\n\n # Populate time-series specific details\n tss = json_ai.problem_definition.timeseries_settings\n if tss.is_timeseries and tss.use_previous_target:\n col_name = f\"__mdb_ts_previous_{json_ai.problem_definition.target}\"\n target_type = json_ai.dtype_dict[json_ai.problem_definition.target]\n json_ai.problem_definition.timeseries_settings.target_type = target_type\n encoder_dict[col_name] = call(\n lookup_encoder(\n target_type,\n col_name,\n False,\n json_ai.problem_definition,\n False,\n None,\n )\n )\n\n dtype_dict[col_name] = target_type\n # @TODO: Is populating the json_ai at this stage even necessary?\n json_ai.encoders[col_name] = encoder_dict[col_name]\n json_ai.dtype_dict[col_name] = target_type\n json_ai.dependency_dict[col_name] = []\n\n # ----------------- #\n\n input_cols = [x.replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"') for x in json_ai.encoders\n if x != json_ai.problem_definition.target]\n input_cols = \",\".join([f\"\"\"'{name}'\"\"\" for name in input_cols])\n\n # ----------------- #\n # Time-series specific code blocks\n # ----------------- #\n\n ts_transform_code = \"\"\n ts_analyze_code = None\n ts_encoder_code = \"\"\n if json_ai.timeseries_transformer is not None:\n ts_transform_code = f\"\"\"\nlog.info('Transforming timeseries data')\ndata = {call(json_ai.timeseries_transformer)}\n\"\"\"\n ts_analyze_code = f\"\"\"\nself.ts_analysis = {call(json_ai.timeseries_analyzer)}\n\"\"\"\n # @TODO: set these kwargs/properties in the json ai construction (if possible)\n if json_ai.timeseries_analyzer is not None:\n ts_encoder_code = \"\"\"\nif encoder.is_timeseries_encoder:\n kwargs['ts_analysis'] = self.ts_analysis\n\"\"\"\n\n if json_ai.problem_definition.timeseries_settings.is_timeseries:\n ts_target_code = \"\"\"\nif encoder.is_target:\n encoder.normalizers = self.ts_analysis['target_normalizers']\n encoder.group_combinations = self.ts_analysis['group_combinations']\n\"\"\"\n else:\n ts_target_code = \"\"\n\n # ----------------- #\n # Statistical Analysis Body\n # ----------------- #\n\n analyze_data_body = f\"\"\"\nlog.info(\"Performing statistical analysis on data\")\nself.statistical_analysis = lightwood.data.statistical_analysis(data,\n self.dtype_dict,\n {json_ai.identifiers},\n self.problem_definition)\n\n# Instantiate post-training evaluation\nself.analysis_blocks = [{', '.join([call(block) for block in json_ai.analysis_blocks])}]\n \"\"\"\n\n analyze_data_body = align(analyze_data_body, 2)\n\n # ----------------- #\n # Pre-processing Body\n # ----------------- #\n\n clean_body = f\"\"\"\nlog.info('Cleaning the data')\nself.imputers = {imputers}\ndata = {call(json_ai.cleaner)}\n\n# Time-series blocks\n{ts_transform_code}\n\"\"\"\n if ts_analyze_code is not None:\n clean_body += f\"\"\"\nif self.mode != 'predict':\n{align(ts_analyze_code,1)}\n\"\"\"\n\n clean_body += '\\nreturn data'\n\n clean_body = align(clean_body, 2)\n\n # ----------------- #\n # Train-Test Splitter Body\n # ----------------- #\n\n split_body = f\"\"\"\nlog.info(\"Splitting the data into train/test\")\ntrain_test_data = {call(json_ai.splitter)}\n\nreturn train_test_data\n \"\"\"\n\n split_body = align(split_body, 2)\n\n # ----------------- #\n # Prepare features Body\n # ----------------- #\n\n prepare_body = f\"\"\"\nself.mode = 'train'\n\nif self.statistical_analysis is None:\n raise Exception(\"Please run analyze_data first\")\n\n# Column to encoder mapping\nself.encoders = {inline_dict(encoder_dict)}\n\n# Prepare the training + dev data\nconcatenated_train_dev = pd.concat([data['train'], data['dev']])\n\nlog.info('Preparing the encoders')\n\nencoder_prepping_dict = {{}}\n\n# Prepare encoders that do not require learned strategies\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and not encoder.is_trainable_encoder:\n encoder_prepping_dict[col_name] = [encoder, concatenated_train_dev[col_name], 'prepare']\n log.info(f'Encoder prepping dict length of: {{len(encoder_prepping_dict)}}')\n\n# Setup parallelization\nparallel_prepped_encoders = mut_method_call(encoder_prepping_dict)\nfor col_name, encoder in parallel_prepped_encoders.items():\n self.encoders[col_name] = encoder\n\n# Prepare the target\nif self.target not in parallel_prepped_encoders:\n if self.encoders[self.target].is_trainable_encoder:\n self.encoders[self.target].prepare(data['train'][self.target], data['dev'][self.target])\n else:\n self.encoders[self.target].prepare(pd.concat([data['train'], data['dev']])[self.target])\n\n# Prepare any non-target encoders that are learned\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and encoder.is_trainable_encoder:\n priming_data = pd.concat([data['train'], data['dev']])\n kwargs = {{}}\n if self.dependencies[col_name]:\n kwargs['dependency_data'] = {{}}\n for col in self.dependencies[col_name]:\n kwargs['dependency_data'][col] = {{\n 'original_type': self.dtype_dict[col],\n 'data': priming_data[col]\n }}\n {align(ts_encoder_code, 3)}\n\n # If an encoder representation requires the target, provide priming data\n if hasattr(encoder, 'uses_target'):\n kwargs['encoded_target_values'] = self.encoders[self.target].encode(priming_data[self.target])\n\n encoder.prepare(data['train'][col_name], data['dev'][col_name], **kwargs)\n\n {align(ts_target_code, 1)}\n\"\"\"\n prepare_body = align(prepare_body, 2)\n\n # ----------------- #\n # Featurize Data Body\n # ----------------- #\n\n feature_body = f\"\"\"\nlog.info('Featurizing the data')\n\nfeature_data = {{ key: EncodedDs(self.encoders, data, self.target) for key, data in split_data.items() if key != \"stratified_on\"}}\n\nreturn feature_data\n\n\"\"\" # noqa\n\n feature_body = align(feature_body, 2)\n\n # ----------------- #\n # Fit Mixer Body\n # ----------------- #\n\n fit_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\nlog.info('Training the mixers')\n\n# --------------- #\n# Fit Models\n# --------------- #\n# Assign list of mixers\nself.mixers = [{', '.join([call(x) for x in json_ai.model[\"args\"][\"submodels\"]])}]\n\n# Train mixers\ntrained_mixers = []\nfor mixer in self.mixers:\n try:\n self.fit_mixer(mixer, encoded_train_data, encoded_dev_data)\n trained_mixers.append(mixer)\n except Exception as e:\n log.warning(f'Exception: {{e}} when training mixer: {{mixer}}')\n if {json_ai.problem_definition.strict_mode} and mixer.stable:\n raise e\n\n# Update mixers to trained versions\nself.mixers = trained_mixers\n\n# --------------- #\n# Create Ensembles\n# --------------- #\nlog.info('Ensembling the mixer')\n# Create an ensemble of mixers to identify best performing model\nself.pred_args = PredictionArguments()\n# Dirty hack\nself.ensemble = {call(json_ai.model)}\nself.supports_proba = self.ensemble.supports_proba\n\"\"\"\n fit_body = align(fit_body, 2)\n\n # ----------------- #\n # Analyze Ensemble Body\n # ----------------- #\n\n analyze_ensemble = f\"\"\"\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\n# --------------- #\n# Analyze Ensembles\n# --------------- #\nlog.info('Analyzing the ensemble of mixers')\nself.model_analysis, self.runtime_analyzer = {call(json_ai.analyzer)}\n\"\"\"\n analyze_ensemble = align(analyze_ensemble, 2)\n\n # ----------------- #\n # Adjust Ensemble Body\n # ----------------- #\n\n adjust_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Prepare data\n# --------------- #\nif old_data is None:\n old_data = pd.DataFrame()\n\nif isinstance(old_data, pd.DataFrame):\n old_data = EncodedDs(self.encoders, old_data, self.target)\n\nif isinstance(new_data, pd.DataFrame):\n new_data = EncodedDs(self.encoders, new_data, self.target)\n\n# --------------- #\n# Update/Adjust Mixers\n# --------------- #\nlog.info('Updating the mixers')\n\nfor mixer in self.mixers:\n mixer.partial_fit(new_data, old_data)\n\"\"\" # noqa\n\n adjust_body = align(adjust_body, 2)\n\n # ----------------- #\n # Learn Body\n # ----------------- #\n\n learn_body = \"\"\"\nself.mode = 'train'\n\n# Perform stats analysis\nself.analyze_data(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Create train/test (dev) split\ntrain_dev_test = self.split(data)\n\n# Prepare encoders\nself.prepare(train_dev_test)\n\n# Create feature vectors from data\nenc_train_test = self.featurize(train_dev_test)\n\n# Prepare mixers\nself.fit(enc_train_test)\n\n# Analyze the ensemble\nself.analyze_ensemble(enc_train_test)\n\n# ------------------------ #\n# Enable model partial fit AFTER it is trained and evaluated for performance with the appropriate train/dev/test splits.\n# This assumes the predictor could continuously evolve, hence including reserved testing data may improve predictions.\n# SET `json_ai.problem_definition.fit_on_all=False` TO TURN THIS BLOCK OFF.\n\n# Update the mixers with partial fit\nif self.problem_definition.fit_on_all:\n\n log.info(\"Adjustment on validation requested.\")\n self.adjust(enc_train_test[\"test\"], ConcatedEncodedDs([enc_train_test[\"train\"], enc_train_test[\"dev\"]]))\n\n\"\"\"\n learn_body = align(learn_body, 2)\n # ----------------- #\n # Predict Body\n # ----------------- #\n\n predict_body = f\"\"\"\nself.mode = 'predict'\n\nif len(data) == 0:\n raise Exception(\"Empty input, aborting prediction. Please try again with some input data.\")\n\n# Remove columns that user specifies to ignore\nlog.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\ndata = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\nfor col in self.input_cols:\n if col not in data.columns:\n data[col] = [None] * len(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Featurize the data\nencoded_ds = self.featurize({{\"predict_data\": data}})[\"predict_data\"]\nencoded_data = encoded_ds.get_encoded_data(include_target=False)\n\nself.pred_args = PredictionArguments.from_dict(args)\ndf = self.ensemble(encoded_ds, args=self.pred_args)\n\nif self.pred_args.all_mixers:\n return df\nelse:\n insights, global_insights = {call(json_ai.explainer)}\n return insights\n\"\"\"\n\n predict_body = align(predict_body, 2)\n\n predictor_code = f\"\"\"\n{IMPORTS}\n{IMPORT_EXTERNAL_DIRS}\n\nclass Predictor(PredictorInterface):\n target: str\n mixers: List[BaseMixer]\n encoders: Dict[str, BaseEncoder]\n ensemble: BaseEnsemble\n mode: str\n\n def __init__(self):\n seed({json_ai.problem_definition.seed_nr})\n self.target = '{json_ai.problem_definition.target}'\n self.mode = 'inactive'\n self.problem_definition = ProblemDefinition.from_dict({json_ai.problem_definition.to_dict()})\n self.accuracy_functions = {json_ai.accuracy_functions}\n self.identifiers = {json_ai.identifiers}\n self.dtype_dict = {inline_dict(dtype_dict)}\n\n # Any feature-column dependencies\n self.dependencies = {inline_dict(json_ai.dependency_dict)}\n\n self.input_cols = [{input_cols}]\n\n # Initial stats analysis\n self.statistical_analysis = None\n self.runtime_log = dict()\n\n @timed\n def analyze_data(self, data: pd.DataFrame) -> None:\n # Perform a statistical analysis on the unprocessed data\n{analyze_data_body}\n\n @timed\n def preprocess(self, data: pd.DataFrame) -> pd.DataFrame:\n # Preprocess and clean data\n{clean_body}\n\n @timed\n def split(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:\n # Split the data into training/testing splits\n{split_body}\n\n @timed\n def prepare(self, data: Dict[str, pd.DataFrame]) -> None:\n # Prepare encoders to featurize data\n{prepare_body}\n\n @timed\n def featurize(self, split_data: Dict[str, pd.DataFrame]):\n # Featurize data into numerical representations for models\n{feature_body}\n\n @timed\n def fit(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Fit predictors to estimate target\n{fit_body}\n\n @timed\n def fit_mixer(self, mixer, encoded_train_data, encoded_dev_data) -> None:\n mixer.fit(encoded_train_data, encoded_dev_data)\n\n @timed\n def analyze_ensemble(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Evaluate quality of fit for the ensemble of mixers\n{analyze_ensemble}\n\n @timed\n def learn(self, data: pd.DataFrame) -> None:\n log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\n data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\n{learn_body}\n\n @timed\n def adjust(self, new_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],\n old_data: Optional[Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame]] = None) -> None:\n # Update mixers with new information\n{adjust_body}\n\n @timed\n def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:\n{predict_body}\n\"\"\"\n\n try:\n import black\n except Exception:\n black = None\n\n if black is not None:\n log.info('Unable to import black formatter, predictor code might be a bit ugly.')\n predictor_code = black.format_str(predictor_code, mode=black.FileMode())\n\n return predictor_code\n\n\ndef validate_json_ai(json_ai: JsonAI) -> bool:\n \"\"\"\n Checks the validity of a ``JsonAI`` object\n\n :param json_ai: A ``JsonAI`` object\n\n :returns: Whether the JsonAI is valid, i.e. doesn't contain prohibited values, unknown values and can be turned into code.\n \"\"\" # noqa\n from lightwood.api.high_level import predictor_from_code, code_from_json_ai\n\n try:\n predictor_from_code(code_from_json_ai(json_ai))\n return True\n except Exception:\n return False\n",
"path": "lightwood/api/json_ai.py"
}
] | [
{
"content": "# TODO: _add_implicit_values unit test ensures NO changes for a fully specified file.\nfrom copy import deepcopy\nfrom lightwood.helpers.templating import call, inline_dict, align\nfrom lightwood.api import dtype\nfrom lightwood.api.types import (\n JsonAI,\n TypeInformation,\n StatisticalAnalysis,\n ProblemDefinition,\n)\nimport inspect\nfrom lightwood.helpers.log import log\n\n\n# For custom modules, we create a module loader with necessary imports below\nIMPORT_EXTERNAL_DIRS = \"\"\"\nfor import_dir in [os.path.join(os.path.expanduser('~/lightwood_modules'), lightwood_version.replace('.', '_')), os.path.join('/etc/lightwood_modules', lightwood_version.replace('.', '_'))]:\n if os.path.exists(import_dir) and os.access(import_dir, os.R_OK):\n for file_name in list(os.walk(import_dir))[0][2]:\n if file_name[-3:] != '.py':\n continue\n mod_name = file_name[:-3]\n loader = importlib.machinery.SourceFileLoader(mod_name,\n os.path.join(import_dir, file_name))\n module = ModuleType(loader.name)\n loader.exec_module(module)\n sys.modules[mod_name] = module\n exec(f'import {mod_name}')\n\"\"\" # noqa\n\nIMPORTS = \"\"\"\nimport lightwood\nfrom lightwood import __version__ as lightwood_version\nfrom lightwood.analysis import *\nfrom lightwood.api import *\nfrom lightwood.data import *\nfrom lightwood.encoder import *\nfrom lightwood.ensemble import *\nfrom lightwood.helpers.device import *\nfrom lightwood.helpers.general import *\nfrom lightwood.helpers.log import *\nfrom lightwood.helpers.numeric import *\nfrom lightwood.helpers.imputers import *\nfrom lightwood.helpers.parallelism import *\nfrom lightwood.helpers.seed import *\nfrom lightwood.helpers.text import *\nfrom lightwood.helpers.torch import *\nfrom lightwood.mixer import *\nimport pandas as pd\nfrom typing import Dict, List, Union\nimport os\nfrom types import ModuleType\nimport importlib.machinery\nimport sys\nimport time\n\"\"\"\n\n\ndef lookup_encoder(\n col_dtype: str,\n col_name: str,\n is_target: bool,\n problem_defintion: ProblemDefinition,\n is_target_predicting_encoder: bool,\n statistical_analysis: StatisticalAnalysis,\n):\n \"\"\"\n Assign a default encoder for a given column based on its data type, and whether it is a target. Encoders intake raw (but cleaned) data and return an feature representation. This function assigns, per data type, what the featurizer should be. This function runs on each column within the dataset available for model building to assign how it should be featurized.\n\n Users may override to create a custom encoder to enable their own featurization process. However, in order to generate template JSON-AI, this code runs automatically. Users may edit the generated syntax and use custom approaches while model building.\n\n For each encoder, \"args\" may be passed. These args depend an encoder requires during its preparation call.\n\n :param col_dtype: A data-type of a column specified\n :param col_name: The name of the column\n :param is_target: Whether the column is the target for prediction. If true, only certain possible feature representations are allowed, particularly for complex data types.\n :param problem_definition: The ``ProblemDefinition`` criteria; this populates specifics on how models and encoders may be trained.\n :param is_target_predicting_encoder:\n \"\"\" # noqa\n\n tss = problem_defintion.timeseries_settings\n encoder_lookup = {\n dtype.integer: \"NumericEncoder\",\n dtype.float: \"NumericEncoder\",\n dtype.binary: \"BinaryEncoder\",\n dtype.categorical: \"CategoricalAutoEncoder\"\n if statistical_analysis is None\n or len(statistical_analysis.histograms[col_name]) > 100\n else \"OneHotEncoder\",\n dtype.tags: \"MultiHotEncoder\",\n dtype.date: \"DatetimeEncoder\",\n dtype.datetime: \"DatetimeEncoder\",\n dtype.image: \"Img2VecEncoder\",\n dtype.rich_text: \"PretrainedLangEncoder\",\n dtype.short_text: \"CategoricalAutoEncoder\",\n dtype.quantity: \"NumericEncoder\",\n dtype.audio: \"MFCCEncoder\",\n dtype.num_array: \"NumArrayEncoder\",\n dtype.cat_array: \"CatArrayEncoder\",\n dtype.num_tsarray: \"TimeSeriesEncoder\",\n dtype.cat_tsarray: \"TimeSeriesEncoder\",\n }\n\n # If column is a target, only specific feature representations are allowed that enable supervised tasks\n target_encoder_lookup_override = {\n dtype.rich_text: \"VocabularyEncoder\",\n dtype.categorical: \"OneHotEncoder\",\n }\n\n # Assign a default encoder to each column.\n encoder_dict = {\"module\": encoder_lookup[col_dtype], \"args\": {}}\n\n # If the column is a target, ensure that the feature representation can enable supervised tasks\n if is_target:\n encoder_dict[\"args\"] = {\"is_target\": \"True\"}\n\n if col_dtype in target_encoder_lookup_override:\n encoder_dict[\"module\"] = target_encoder_lookup_override[col_dtype]\n\n if col_dtype in (dtype.categorical, dtype.binary):\n if problem_defintion.unbias_target:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = \"$statistical_analysis.target_weights\"\n if problem_defintion.target_weights is not None:\n encoder_dict[\"args\"][\n \"target_weights\"\n ] = problem_defintion.target_weights\n\n if col_dtype in (dtype.integer, dtype.float, dtype.num_array, dtype.num_tsarray):\n encoder_dict[\"args\"][\n \"positive_domain\"\n ] = \"$statistical_analysis.positive_domain\"\n\n # Time-series representations require more advanced flags\n if tss.is_timeseries:\n gby = tss.group_by if tss.group_by is not None else []\n if col_name in tss.order_by:\n encoder_dict[\"module\"] = \"ArrayEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n if is_target:\n if col_dtype in [dtype.integer]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if col_dtype in [dtype.float]:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"module\"] = \"TsNumericEncoder\"\n if tss.horizon > 1:\n encoder_dict[\"args\"][\"grouped_by\"] = f\"{gby}\"\n encoder_dict[\"args\"][\"timesteps\"] = f\"{tss.horizon}\"\n if col_dtype in [dtype.num_tsarray]:\n encoder_dict[\"module\"] = \"TsArrayNumericEncoder\"\n elif col_dtype in [dtype.cat_tsarray]:\n encoder_dict[\"module\"] = \"TsCatArrayEncoder\"\n\n if \"__mdb_ts_previous\" in col_name or col_name in tss.historical_columns:\n encoder_dict[\"module\"] = \"TimeSeriesEncoder\"\n encoder_dict[\"args\"][\"original_type\"] = f'\"{tss.target_type}\"'\n encoder_dict[\"args\"][\"window\"] = f\"{tss.window}\"\n\n # Set arguments for the encoder\n if encoder_dict[\"module\"] == \"PretrainedLangEncoder\" and not is_target:\n encoder_dict[\"args\"][\"output_type\"] = \"$dtype_dict[$target]\"\n\n if eval(encoder_dict[\"module\"]).is_trainable_encoder:\n encoder_dict[\"args\"][\"stop_after\"] = \"$problem_definition.seconds_per_encoder\"\n\n if is_target_predicting_encoder:\n encoder_dict[\"args\"][\"embed_mode\"] = \"False\"\n return encoder_dict\n\n\ndef generate_json_ai(\n type_information: TypeInformation,\n statistical_analysis: StatisticalAnalysis,\n problem_definition: ProblemDefinition,\n) -> JsonAI:\n \"\"\"\n Given ``TypeInformation``, ``StatisticalAnalysis``, and the ``ProblemDefinition``, generate a JSON config file with the necessary elements of the ML pipeline populated.\n\n :param TypeInformation: Specifies what data types each column within the dataset are\n :param statistical_analysis:\n :param problem_definition: Specifies details of the model training/building procedure, as defined by ``ProblemDefinition``\n\n :returns: JSON-AI object with fully populated details of the ML pipeline\n \"\"\" # noqaexec\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n target = problem_definition.target\n input_cols = []\n tss = problem_definition.timeseries_settings\n dtype_dict = type_information.dtypes\n for k in type_information.identifiers:\n if not (tss.is_timeseries and tss.group_by and k in tss.group_by):\n del dtype_dict[k]\n dependency_dict = {}\n\n for col_name, col_dtype in dtype_dict.items():\n if (\n (col_name not in type_information.identifiers\n and col_dtype not in (dtype.invalid, dtype.empty)\n and col_name != target)\n or\n (tss.group_by is not None and col_name in tss.group_by)\n ):\n if col_name != problem_definition.target:\n input_cols.append(col_name)\n\n is_target_predicting_encoder = False\n is_ts = problem_definition.timeseries_settings.is_timeseries\n\n # Single text column classification\n if (\n len(input_cols) == 1\n and type_information.dtypes[input_cols[0]] in (dtype.rich_text)\n and type_information.dtypes[target] in (dtype.categorical, dtype.binary)\n ):\n is_target_predicting_encoder = True\n\n if is_target_predicting_encoder:\n submodels = [\n {\n \"module\": \"Unit\",\n \"args\": {\n \"target_encoder\": \"$encoders[self.target]\",\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n }\n ]\n else:\n submodels = [\n {\n \"module\": \"Neural\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"search_hyperparameters\": True,\n },\n }\n ]\n\n if (not tss.is_timeseries or tss.horizon == 1) and dtype_dict[target] not in (dtype.num_array, dtype.cat_array):\n submodels.extend(\n [\n {\n \"module\": \"LightGBM\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"fit_on_dev\": True,\n },\n },\n {\n \"module\": \"Regression\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n },\n },\n ]\n )\n elif tss.is_timeseries and tss.horizon > 1:\n submodels.extend(\n [\n {\n \"module\": \"LightGBMArray\",\n \"args\": {\n \"fit_on_dev\": True,\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n if tss.use_previous_target and dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n submodels.extend(\n [\n {\n \"module\": \"SkTime\",\n \"args\": {\n \"stop_after\": \"$problem_definition.seconds_per_mixer\",\n \"n_ts_predictions\": \"$problem_definition.timeseries_settings.horizon\",\n },\n }\n ]\n )\n\n model = {\n \"module\": \"BestOf\",\n \"args\": {\n \"submodels\": submodels,\n \"args\": \"$pred_args\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"ts_analysis\": \"self.ts_analysis\" if is_ts else None,\n }\n }\n\n if tss.is_timeseries and tss.horizon > 1:\n if dtype_dict[target] in (dtype.integer, dtype.float, dtype.quantity):\n dtype_dict[target] = dtype.num_tsarray\n else:\n dtype_dict[target] = dtype.cat_tsarray\n\n encoders = {\n target: lookup_encoder(\n dtype_dict[target],\n target,\n True,\n problem_definition,\n False,\n statistical_analysis,\n )\n }\n\n for col in input_cols:\n encoders[col] = lookup_encoder(\n dtype_dict[col],\n col,\n False,\n problem_definition,\n is_target_predicting_encoder,\n statistical_analysis,\n )\n\n # Decide on the accuracy functions to use\n output_dtype = dtype_dict[target]\n if output_dtype in [\n dtype.integer,\n dtype.float,\n dtype.date,\n dtype.datetime,\n dtype.quantity,\n ]:\n accuracy_functions = [\"r2_score\"]\n elif output_dtype in [dtype.categorical, dtype.tags, dtype.binary]:\n accuracy_functions = [\"balanced_accuracy_score\"]\n elif output_dtype in (dtype.num_array, dtype.num_tsarray):\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n elif output_dtype in (dtype.cat_array, dtype.cat_tsarray):\n accuracy_functions = [\"evaluate_cat_array_accuracy\"]\n else:\n raise Exception(\n f\"Please specify a custom accuracy function for output type {output_dtype}\"\n )\n\n # special dispatch for t+1 time series forecasters\n if is_ts:\n if output_dtype in [dtype.integer, dtype.float]:\n accuracy_functions = [\"evaluate_num_array_accuracy\"]\n\n if problem_definition.time_aim is None:\n # 5 days\n problem_definition.time_aim = 3 * 24 * 3600\n\n # Encoders are assigned 1/3 of the time unless a user overrides this (equal time per encoder)\n if problem_definition.seconds_per_encoder is None:\n nr_trainable_encoders = len(\n [\n x\n for x in encoders.values()\n if eval(x[\"module\"]).is_trainable_encoder\n ]\n )\n if nr_trainable_encoders > 0:\n problem_definition.seconds_per_encoder = 0.33 * problem_definition.time_aim / nr_trainable_encoders\n\n # Mixers are assigned 1/3 of the time aim (or 2/3 if there are no trainable encoders )\\\n # unless a user overrides this (equal time per mixer)\n if problem_definition.seconds_per_mixer is None:\n if problem_definition.seconds_per_encoder is None:\n problem_definition.seconds_per_mixer = 0.66 * problem_definition.time_aim / len(model['args']['submodels'])\n else:\n problem_definition.seconds_per_mixer = 0.33 * problem_definition.time_aim / len(model['args']['submodels'])\n\n return JsonAI(\n cleaner=None,\n splitter=None,\n analyzer=None,\n explainer=None,\n encoders=encoders,\n dtype_dict=dtype_dict,\n dependency_dict=dependency_dict,\n model=model,\n problem_definition=problem_definition,\n identifiers=type_information.identifiers,\n timeseries_transformer=None,\n timeseries_analyzer=None,\n accuracy_functions=accuracy_functions,\n )\n\n\ndef _merge_implicit_values(field: dict, implicit_value: dict) -> dict:\n \"\"\"\n Helper function for `_populate_implicit_field`.\n Takes a user-defined field along with its implicit value, and merges them together.\n\n :param field: JsonAI field with user-defined parameters.\n :param implicit_value: implicit values for the field.\n :return: original field with implicit values merged into it.\n \"\"\"\n exec(IMPORTS, globals())\n exec(IMPORT_EXTERNAL_DIRS, globals())\n module = eval(field[\"module\"])\n\n if inspect.isclass(module):\n args = list(inspect.signature(module.__init__).parameters.keys())[1:]\n else:\n args = module.__code__.co_varnames\n\n for arg in args:\n if \"args\" not in field:\n field[\"args\"] = implicit_value[\"args\"]\n else:\n if arg not in field[\"args\"]:\n if arg in implicit_value[\"args\"]:\n field[\"args\"][arg] = implicit_value[\"args\"][arg]\n\n return field\n\n\ndef _populate_implicit_field(\n json_ai: JsonAI, field_name: str, implicit_value: dict, is_timeseries: bool\n) -> None:\n \"\"\"\n Populate the implicit field of the JsonAI, either by filling it in entirely if missing, or by introspecting the class or function and assigning default values to the args in it's signature that are in the implicit default but haven't been populated by the user\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n :params: field_name: Name of the field the implicit field in ``JsonAI``\n :params: implicit_value: The dictionary containing implicit values for the module and arg in the field\n :params: is_timeseries: Whether or not this is a timeseries problem\n\n :returns: nothing, this method mutates the respective field of the ``JsonAI`` object it receives\n \"\"\" # noqa\n # These imports might be slow, in which case the only <easy> solution is to line this code\n field = json_ai.__getattribute__(field_name)\n if field is None:\n # This if is to only populated timeseries-specific implicit fields for implicit problems\n if is_timeseries or field_name not in (\n \"timeseries_analyzer\",\n \"timeseries_transformer\",\n ):\n field = implicit_value\n\n # If the user specified one or more subfields in a field that's a list\n # Populate them with implicit arguments form the implicit values from that subfield\n elif isinstance(field, list) and isinstance(implicit_value, list):\n for i in range(len(field)):\n sub_field_implicit = [\n x for x in implicit_value if x[\"module\"] == field[i][\"module\"]\n ]\n if len(sub_field_implicit) == 1:\n field[i] = _merge_implicit_values(field[i], sub_field_implicit[0])\n for sub_field_implicit in implicit_value:\n if (\n len([x for x in field if x[\"module\"] == sub_field_implicit[\"module\"]])\n == 0\n ):\n field.append(sub_field_implicit)\n # If the user specified the field, add implicit arguments which we didn't specify\n else:\n field = _merge_implicit_values(field, implicit_value)\n json_ai.__setattr__(field_name, field)\n\n\ndef _add_implicit_values(json_ai: JsonAI) -> JsonAI:\n \"\"\"\n To enable brevity in writing, auto-generate the \"unspecified/missing\" details required in the ML pipeline.\n\n :params: json_ai: ``JsonAI`` object that describes the ML pipeline that may not have every detail fully specified.\n\n :returns: ``JSONAI`` object with all necessary parameters that were previously left unmentioned filled in.\n \"\"\"\n problem_definition = json_ai.problem_definition\n tss = problem_definition.timeseries_settings\n is_ts = tss.is_timeseries\n\n # Add implicit arguments\n # @TODO: Consider removing once we have a proper editor in studio\n mixers = json_ai.model['args']['submodels']\n for i in range(len(mixers)):\n if mixers[i][\"module\"] == \"Unit\":\n pass\n elif mixers[i][\"module\"] == \"Neural\":\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"timeseries_settings\"] = mixers[i][\"args\"].get(\n \"timeseries_settings\", \"$problem_definition.timeseries_settings\"\n )\n mixers[i][\"args\"][\"net\"] = mixers[i][\"args\"].get(\n \"net\",\n '\"DefaultNet\"'\n if not tss.is_timeseries or not tss.use_previous_target\n else '\"ArNet\"',\n )\n\n elif mixers[i][\"module\"] == \"LightGBM\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n mixers[i][\"args\"][\"use_optuna\"] = True\n elif mixers[i][\"module\"] == \"Regression\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"LightGBMArray\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"input_cols\"] = mixers[i][\"args\"].get(\n \"input_cols\", \"$input_cols\"\n )\n mixers[i][\"args\"][\"target_encoder\"] = mixers[i][\"args\"].get(\n \"target_encoder\", \"$encoders[self.target]\"\n )\n elif mixers[i][\"module\"] == \"SkTime\":\n mixers[i][\"args\"][\"target\"] = mixers[i][\"args\"].get(\"target\", \"$target\")\n mixers[i][\"args\"][\"dtype_dict\"] = mixers[i][\"args\"].get(\n \"dtype_dict\", \"$dtype_dict\"\n )\n mixers[i][\"args\"][\"ts_analysis\"] = mixers[i][\"args\"].get(\n \"ts_analysis\", \"$ts_analysis\"\n )\n # enforce fit_on_all if this mixer is specified\n problem_definition.fit_on_all = True\n\n json_ai.model[\"args\"][\"target\"] = json_ai.model[\"args\"].get(\"target\", \"$target\")\n json_ai.model[\"args\"][\"data\"] = json_ai.model[\"args\"].get(\"data\", \"encoded_test_data\")\n json_ai.model[\"args\"][\"mixers\"] = json_ai.model[\"args\"].get(\"mixers\", \"$mixers\")\n\n for name in json_ai.encoders:\n if name not in json_ai.dependency_dict:\n json_ai.dependency_dict[name] = []\n\n # Add \"hidden\" fields\n hidden_fields = {\n \"cleaner\": {\n \"module\": \"cleaner\",\n \"args\": {\n \"pct_invalid\": \"$problem_definition.pct_invalid\",\n \"identifiers\": \"$identifiers\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n \"imputers\": \"$imputers\",\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n },\n },\n \"splitter\": {\n \"module\": \"splitter\",\n \"args\": {\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"seed\": 1,\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"pct_train\": 0.8,\n \"pct_dev\": 0.1,\n \"pct_test\": 0.1,\n },\n },\n \"analyzer\": {\n \"module\": \"model_analyzer\",\n \"args\": {\n \"stats_info\": \"$statistical_analysis\",\n \"tss\": \"$problem_definition.timeseries_settings\",\n \"accuracy_functions\": \"$accuracy_functions\",\n \"predictor\": \"$ensemble\",\n \"data\": \"encoded_test_data\",\n \"train_data\": \"encoded_train_data\",\n \"target\": \"$target\",\n \"dtype_dict\": \"$dtype_dict\",\n \"analysis_blocks\": \"$analysis_blocks\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n },\n },\n \"explainer\": {\n \"module\": \"explain\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n \"anomaly_detection\": \"$problem_definition.anomaly_detection\",\n \"data\": \"data\",\n \"encoded_data\": \"encoded_data\",\n \"predictions\": \"df\",\n \"analysis\": \"$runtime_analyzer\",\n \"ts_analysis\": \"$ts_analysis\" if is_ts else None,\n \"target_name\": \"$target\",\n \"target_dtype\": \"$dtype_dict[self.target]\",\n \"explainer_blocks\": \"$analysis_blocks\",\n \"pred_args\": \"$pred_args\",\n },\n },\n \"analysis_blocks\": [\n {\n \"module\": \"ICP\",\n \"args\": {\n \"fixed_significance\": None,\n \"confidence_normalizer\": False,\n \"positive_domain\": \"$statistical_analysis.positive_domain\",\n },\n },\n {\n \"module\": \"AccStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n {\n \"module\": \"ConfStats\",\n \"args\": {\"deps\": [\"ICP\"]},\n },\n ] if problem_definition.use_default_analysis else [],\n \"timeseries_transformer\": {\n \"module\": \"transform_timeseries\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n \"mode\": \"$mode\",\n },\n },\n \"timeseries_analyzer\": {\n \"module\": \"timeseries_analyzer\",\n \"args\": {\n \"timeseries_settings\": \"$problem_definition.timeseries_settings\",\n \"data\": \"data\",\n \"dtype_dict\": \"$dtype_dict\",\n \"target\": \"$target\",\n },\n },\n }\n\n for field_name, implicit_value in hidden_fields.items():\n _populate_implicit_field(json_ai, field_name, implicit_value, tss.is_timeseries)\n\n return json_ai\n\n\ndef code_from_json_ai(json_ai: JsonAI) -> str:\n \"\"\"\n Generates a custom ``PredictorInterface`` given the specifications from ``JsonAI`` object.\n\n :param json_ai: ``JsonAI`` object with fully specified parameters\n\n :returns: Automated syntax of the ``PredictorInterface`` object.\n \"\"\"\n json_ai = deepcopy(json_ai)\n # ----------------- #\n # Fill in any missing values\n json_ai = _add_implicit_values(json_ai)\n\n # ----------------- #\n\n # Instantiate data types\n dtype_dict = {}\n\n for k in json_ai.dtype_dict:\n if json_ai.dtype_dict[k] not in (dtype.invalid, dtype.empty):\n dtype_dict[k] = json_ai.dtype_dict[k]\n\n # Populate imputers\n imputer_dict = {}\n if json_ai.imputers:\n for imputer in json_ai.imputers:\n imputer_dict[imputer['args']['target'].replace('\\'', '').replace('\\\"', '')] = call(imputer)\n json_ai.imputers = imputer_dict\n imputers = inline_dict(json_ai.imputers)\n\n # Populate encoders\n encoder_dict = {}\n for col_name, encoder in json_ai.encoders.items():\n encoder_dict[col_name] = call(encoder)\n\n # Populate time-series specific details\n tss = json_ai.problem_definition.timeseries_settings\n if tss.is_timeseries and tss.use_previous_target:\n col_name = f\"__mdb_ts_previous_{json_ai.problem_definition.target}\"\n target_type = json_ai.dtype_dict[json_ai.problem_definition.target]\n json_ai.problem_definition.timeseries_settings.target_type = target_type\n encoder_dict[col_name] = call(\n lookup_encoder(\n target_type,\n col_name,\n False,\n json_ai.problem_definition,\n False,\n None,\n )\n )\n\n dtype_dict[col_name] = target_type\n # @TODO: Is populating the json_ai at this stage even necessary?\n json_ai.encoders[col_name] = encoder_dict[col_name]\n json_ai.dtype_dict[col_name] = target_type\n json_ai.dependency_dict[col_name] = []\n\n # ----------------- #\n\n input_cols = [x.replace(\"'\", \"\\\\'\").replace('\"', '\\\\\"') for x in json_ai.encoders\n if x != json_ai.problem_definition.target]\n input_cols = \",\".join([f\"\"\"'{name}'\"\"\" for name in input_cols])\n\n # ----------------- #\n # Time-series specific code blocks\n # ----------------- #\n\n ts_transform_code = \"\"\n ts_analyze_code = None\n ts_encoder_code = \"\"\n if json_ai.timeseries_transformer is not None:\n ts_transform_code = f\"\"\"\nlog.info('Transforming timeseries data')\ndata = {call(json_ai.timeseries_transformer)}\n\"\"\"\n ts_analyze_code = f\"\"\"\nself.ts_analysis = {call(json_ai.timeseries_analyzer)}\n\"\"\"\n # @TODO: set these kwargs/properties in the json ai construction (if possible)\n if json_ai.timeseries_analyzer is not None:\n ts_encoder_code = \"\"\"\nif encoder.is_timeseries_encoder:\n kwargs['ts_analysis'] = self.ts_analysis\n\"\"\"\n\n if json_ai.problem_definition.timeseries_settings.is_timeseries:\n ts_target_code = \"\"\"\nif encoder.is_target:\n encoder.normalizers = self.ts_analysis['target_normalizers']\n encoder.group_combinations = self.ts_analysis['group_combinations']\n\"\"\"\n else:\n ts_target_code = \"\"\n\n # ----------------- #\n # Statistical Analysis Body\n # ----------------- #\n\n analyze_data_body = f\"\"\"\nlog.info(\"Performing statistical analysis on data\")\nself.statistical_analysis = lightwood.data.statistical_analysis(data,\n self.dtype_dict,\n {json_ai.identifiers},\n self.problem_definition)\n\n# Instantiate post-training evaluation\nself.analysis_blocks = [{', '.join([call(block) for block in json_ai.analysis_blocks])}]\n \"\"\"\n\n analyze_data_body = align(analyze_data_body, 2)\n\n # ----------------- #\n # Pre-processing Body\n # ----------------- #\n\n clean_body = f\"\"\"\nlog.info('Cleaning the data')\nself.imputers = {imputers}\ndata = {call(json_ai.cleaner)}\n\n# Time-series blocks\n{ts_transform_code}\n\"\"\"\n if ts_analyze_code is not None:\n clean_body += f\"\"\"\nif self.mode != 'predict':\n{align(ts_analyze_code,1)}\n\"\"\"\n\n clean_body += '\\nreturn data'\n\n clean_body = align(clean_body, 2)\n\n # ----------------- #\n # Train-Test Splitter Body\n # ----------------- #\n\n split_body = f\"\"\"\nlog.info(\"Splitting the data into train/test\")\ntrain_test_data = {call(json_ai.splitter)}\n\nreturn train_test_data\n \"\"\"\n\n split_body = align(split_body, 2)\n\n # ----------------- #\n # Prepare features Body\n # ----------------- #\n\n prepare_body = f\"\"\"\nself.mode = 'train'\n\nif self.statistical_analysis is None:\n raise Exception(\"Please run analyze_data first\")\n\n# Column to encoder mapping\nself.encoders = {inline_dict(encoder_dict)}\n\n# Prepare the training + dev data\nconcatenated_train_dev = pd.concat([data['train'], data['dev']])\n\nlog.info('Preparing the encoders')\n\nencoder_prepping_dict = {{}}\n\n# Prepare encoders that do not require learned strategies\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and not encoder.is_trainable_encoder:\n encoder_prepping_dict[col_name] = [encoder, concatenated_train_dev[col_name], 'prepare']\n log.info(f'Encoder prepping dict length of: {{len(encoder_prepping_dict)}}')\n\n# Setup parallelization\nparallel_prepped_encoders = mut_method_call(encoder_prepping_dict)\nfor col_name, encoder in parallel_prepped_encoders.items():\n self.encoders[col_name] = encoder\n\n# Prepare the target\nif self.target not in parallel_prepped_encoders:\n if self.encoders[self.target].is_trainable_encoder:\n self.encoders[self.target].prepare(data['train'][self.target], data['dev'][self.target])\n else:\n self.encoders[self.target].prepare(pd.concat([data['train'], data['dev']])[self.target])\n\n# Prepare any non-target encoders that are learned\nfor col_name, encoder in self.encoders.items():\n if col_name != self.target and encoder.is_trainable_encoder:\n priming_data = pd.concat([data['train'], data['dev']])\n kwargs = {{}}\n if self.dependencies[col_name]:\n kwargs['dependency_data'] = {{}}\n for col in self.dependencies[col_name]:\n kwargs['dependency_data'][col] = {{\n 'original_type': self.dtype_dict[col],\n 'data': priming_data[col]\n }}\n {align(ts_encoder_code, 3)}\n\n # If an encoder representation requires the target, provide priming data\n if hasattr(encoder, 'uses_target'):\n kwargs['encoded_target_values'] = self.encoders[self.target].encode(priming_data[self.target])\n\n encoder.prepare(data['train'][col_name], data['dev'][col_name], **kwargs)\n\n {align(ts_target_code, 1)}\n\"\"\"\n prepare_body = align(prepare_body, 2)\n\n # ----------------- #\n # Featurize Data Body\n # ----------------- #\n\n feature_body = f\"\"\"\nlog.info('Featurizing the data')\n\nfeature_data = {{ key: EncodedDs(self.encoders, data, self.target) for key, data in split_data.items() if key != \"stratified_on\"}}\n\nreturn feature_data\n\n\"\"\" # noqa\n\n feature_body = align(feature_body, 2)\n\n # ----------------- #\n # Fit Mixer Body\n # ----------------- #\n\n fit_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\nlog.info('Training the mixers')\n\n# --------------- #\n# Fit Models\n# --------------- #\n# Assign list of mixers\nself.mixers = [{', '.join([call(x) for x in json_ai.model[\"args\"][\"submodels\"]])}]\n\n# Train mixers\ntrained_mixers = []\nfor mixer in self.mixers:\n try:\n self.fit_mixer(mixer, encoded_train_data, encoded_dev_data)\n trained_mixers.append(mixer)\n except Exception as e:\n log.warning(f'Exception: {{e}} when training mixer: {{mixer}}')\n if {json_ai.problem_definition.strict_mode} and mixer.stable:\n raise e\n\n# Update mixers to trained versions\nself.mixers = trained_mixers\n\n# --------------- #\n# Create Ensembles\n# --------------- #\nlog.info('Ensembling the mixer')\n# Create an ensemble of mixers to identify best performing model\nself.pred_args = PredictionArguments()\n# Dirty hack\nself.ensemble = {call(json_ai.model)}\nself.supports_proba = self.ensemble.supports_proba\n\"\"\"\n fit_body = align(fit_body, 2)\n\n # ----------------- #\n # Analyze Ensemble Body\n # ----------------- #\n\n analyze_ensemble = f\"\"\"\n\n# --------------- #\n# Extract data\n# --------------- #\n# Extract the featurized data into train/dev/test\nencoded_train_data = enc_data['train']\nencoded_dev_data = enc_data['dev']\nencoded_test_data = enc_data['test']\n\n# --------------- #\n# Analyze Ensembles\n# --------------- #\nlog.info('Analyzing the ensemble of mixers')\nself.model_analysis, self.runtime_analyzer = {call(json_ai.analyzer)}\n\"\"\"\n analyze_ensemble = align(analyze_ensemble, 2)\n\n # ----------------- #\n # Adjust Ensemble Body\n # ----------------- #\n\n adjust_body = f\"\"\"\nself.mode = 'train'\n\n# --------------- #\n# Prepare data\n# --------------- #\nif old_data is None:\n old_data = pd.DataFrame()\n\nif isinstance(old_data, pd.DataFrame):\n old_data = EncodedDs(self.encoders, old_data, self.target)\n\nif isinstance(new_data, pd.DataFrame):\n new_data = EncodedDs(self.encoders, new_data, self.target)\n\n# --------------- #\n# Update/Adjust Mixers\n# --------------- #\nlog.info('Updating the mixers')\n\nfor mixer in self.mixers:\n mixer.partial_fit(new_data, old_data)\n\"\"\" # noqa\n\n adjust_body = align(adjust_body, 2)\n\n # ----------------- #\n # Learn Body\n # ----------------- #\n\n learn_body = \"\"\"\nself.mode = 'train'\n\n# Perform stats analysis\nself.analyze_data(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Create train/test (dev) split\ntrain_dev_test = self.split(data)\n\n# Prepare encoders\nself.prepare(train_dev_test)\n\n# Create feature vectors from data\nenc_train_test = self.featurize(train_dev_test)\n\n# Prepare mixers\nself.fit(enc_train_test)\n\n# Analyze the ensemble\nself.analyze_ensemble(enc_train_test)\n\n# ------------------------ #\n# Enable model partial fit AFTER it is trained and evaluated for performance with the appropriate train/dev/test splits.\n# This assumes the predictor could continuously evolve, hence including reserved testing data may improve predictions.\n# SET `json_ai.problem_definition.fit_on_all=False` TO TURN THIS BLOCK OFF.\n\n# Update the mixers with partial fit\nif self.problem_definition.fit_on_all:\n\n log.info(\"Adjustment on validation requested.\")\n self.adjust(enc_train_test[\"test\"], ConcatedEncodedDs([enc_train_test[\"train\"], enc_train_test[\"dev\"]]))\n\n\"\"\"\n learn_body = align(learn_body, 2)\n # ----------------- #\n # Predict Body\n # ----------------- #\n\n predict_body = f\"\"\"\nself.mode = 'predict'\n\nif len(data) == 0:\n raise Exception(\"Empty input, aborting prediction. Please try again with some input data.\")\n\n# Remove columns that user specifies to ignore\nlog.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\ndata = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\nfor col in self.input_cols:\n if col not in data.columns:\n data[col] = [None] * len(data)\n\n# Pre-process the data\ndata = self.preprocess(data)\n\n# Featurize the data\nencoded_ds = self.featurize({{\"predict_data\": data}})[\"predict_data\"]\nencoded_data = encoded_ds.get_encoded_data(include_target=False)\n\nself.pred_args = PredictionArguments.from_dict(args)\ndf = self.ensemble(encoded_ds, args=self.pred_args)\n\nif self.pred_args.all_mixers:\n return df\nelse:\n insights, global_insights = {call(json_ai.explainer)}\n return insights\n\"\"\"\n\n predict_body = align(predict_body, 2)\n\n predictor_code = f\"\"\"\n{IMPORTS}\n{IMPORT_EXTERNAL_DIRS}\n\nclass Predictor(PredictorInterface):\n target: str\n mixers: List[BaseMixer]\n encoders: Dict[str, BaseEncoder]\n ensemble: BaseEnsemble\n mode: str\n\n def __init__(self):\n seed({json_ai.problem_definition.seed_nr})\n self.target = '{json_ai.problem_definition.target}'\n self.mode = 'inactive'\n self.problem_definition = ProblemDefinition.from_dict({json_ai.problem_definition.to_dict()})\n self.accuracy_functions = {json_ai.accuracy_functions}\n self.identifiers = {json_ai.identifiers}\n self.dtype_dict = {inline_dict(dtype_dict)}\n\n # Any feature-column dependencies\n self.dependencies = {inline_dict(json_ai.dependency_dict)}\n\n self.input_cols = [{input_cols}]\n\n # Initial stats analysis\n self.statistical_analysis = None\n self.runtime_log = dict()\n\n @timed\n def analyze_data(self, data: pd.DataFrame) -> None:\n # Perform a statistical analysis on the unprocessed data\n{analyze_data_body}\n\n @timed\n def preprocess(self, data: pd.DataFrame) -> pd.DataFrame:\n # Preprocess and clean data\n{clean_body}\n\n @timed\n def split(self, data: pd.DataFrame) -> Dict[str, pd.DataFrame]:\n # Split the data into training/testing splits\n{split_body}\n\n @timed\n def prepare(self, data: Dict[str, pd.DataFrame]) -> None:\n # Prepare encoders to featurize data\n{prepare_body}\n\n @timed\n def featurize(self, split_data: Dict[str, pd.DataFrame]):\n # Featurize data into numerical representations for models\n{feature_body}\n\n @timed\n def fit(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Fit predictors to estimate target\n{fit_body}\n\n @timed\n def fit_mixer(self, mixer, encoded_train_data, encoded_dev_data) -> None:\n mixer.fit(encoded_train_data, encoded_dev_data)\n\n @timed\n def analyze_ensemble(self, enc_data: Dict[str, pd.DataFrame]) -> None:\n # Evaluate quality of fit for the ensemble of mixers\n{analyze_ensemble}\n\n @timed\n def learn(self, data: pd.DataFrame) -> None:\n log.info(f'Dropping features: {{self.problem_definition.ignore_features}}')\n data = data.drop(columns=self.problem_definition.ignore_features, errors='ignore')\n{learn_body}\n\n @timed\n def adjust(self, new_data: Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame],\n old_data: Optional[Union[EncodedDs, ConcatedEncodedDs, pd.DataFrame]] = None) -> None:\n # Update mixers with new information\n{adjust_body}\n\n @timed\n def predict(self, data: pd.DataFrame, args: Dict = {{}}) -> pd.DataFrame:\n{predict_body}\n\"\"\"\n\n try:\n import black\n except Exception:\n black = None\n\n if black is not None:\n log.info('Unable to import black formatter, predictor code might be a bit ugly.')\n predictor_code = black.format_str(predictor_code, mode=black.FileMode())\n\n return predictor_code\n\n\ndef validate_json_ai(json_ai: JsonAI) -> bool:\n \"\"\"\n Checks the validity of a ``JsonAI`` object\n\n :param json_ai: A ``JsonAI`` object\n\n :returns: Whether the JsonAI is valid, i.e. doesn't contain prohibited values, unknown values and can be turned into code.\n \"\"\" # noqa\n from lightwood.api.high_level import predictor_from_code, code_from_json_ai\n\n try:\n predictor_from_code(code_from_json_ai(json_ai))\n return True\n except Exception:\n return False\n",
"path": "lightwood/api/json_ai.py"
}
] | diff --git a/lightwood/api/json_ai.py b/lightwood/api/json_ai.py
index 756377e06..5a3783ec4 100644
--- a/lightwood/api/json_ai.py
+++ b/lightwood/api/json_ai.py
@@ -193,7 +193,8 @@ def generate_json_ai(
tss = problem_definition.timeseries_settings
dtype_dict = type_information.dtypes
for k in type_information.identifiers:
- del dtype_dict[k]
+ if not (tss.is_timeseries and tss.group_by and k in tss.group_by):
+ del dtype_dict[k]
dependency_dict = {}
for col_name, col_dtype in dtype_dict.items():
| [TS] error if 'group by' column contains a single value
If 'group by' column has single value for all cells, then error appears. Can be replicated with any ts-dataset if add filter to data select query:
```
create predictor p_name from int_name (select * from test_data.ts_dataset where location='good') predict rental_price order by date group by location window 5 horizon 3;
```
error is
```
Traceback (most recent call last):
File "./mindsdb/interfaces/model/learn_process.py", line 175, in run_learn
run_fit(predictor_id, df)
File "./mindsdb/utilities/functions.py", line 56, in wrapper
return func(*args, **kwargs)
File "./mindsdb/interfaces/model/learn_process.py", line 148, in run_fit
raise e
File "./mindsdb/interfaces/model/learn_process.py", line 119, in run_fit
predictor.learn(df)
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py", line 30, in wrap
result = f(predictor, *args, **kw)
File "/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py", line 450, in learn
self.analyze_data(data)
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/helpers/log.py", line 30, in wrap
result = f(predictor, *args, **kw)
File "/tmp/52931846b2322b65fafeb5782f9a3e9e76650c2aac7cecf516512142146485069.py", line 137, in analyze_data
self.statistical_analysis = lightwood.data.statistical_analysis(
File "/home/maxs/dev/mdb/venv38/sources/lightwood/lightwood/data/statistical_analysis.py", line 120, in statistical_analysis
if dtypes[col] in (dtype.categorical, dtype.binary, dtype.tags):
KeyError: 'location'
```
|
plone__Products.CMFPlone-2822 | [
{
"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom StringIO import StringIO\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nfrom collections import OrderedDict\nimport logging\nimport re\n\n\nPRODUCTION_RESOURCE_DIRECTORY = 'production'\nlogger = logging.getLogger(__name__)\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n if 'timestamp.txt' not in production_folder:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return '%s/++unique++%s' % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n try:\n resource = context.unrestrictedTraverse(path)\n except NotFound:\n logger.warn(u'Could not find resource {0}. You may have to create it first.'.format(path)) # noqa\n return\n\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n\n # calling the resource may modify the header, i.e. the content-type.\n # we do not want this, so keep the original header intact.\n response_before = context.REQUEST.response\n context.REQUEST.response = response_before.__class__()\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n result = resource.GET()\n else:\n # any BrowserView\n result = resource()\n context.REQUEST.response = response_before\n return result\n\n\nclass MetaBundleWriter(object):\n\n def __init__(self, context, folder, name):\n self.context = context\n self.folder = folder\n self.name = name\n self.js_resources = OrderedDict()\n self.css_resources = OrderedDict()\n self.registry = getUtility(IRegistry)\n self.bundles = self.registry.collectionOfInterface(\n IBundleRegistry, prefix='plone.bundles', check=False)\n\n def write_js(self):\n\n # default resources\n if self.name == 'default' and self.registry.records.get(\n 'plone.resources/jquery.js'\n ):\n self.js_resources['_jquery'] = get_resource(\n self.context,\n self.registry.records['plone.resources/jquery.js'].value)\n self.js_resources['_requirejs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.requirejs'].value)\n self.js_resources['_configjs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.configjs'].value)\n\n # bundles\n for name, bundle in self.bundles.items():\n self.load_js_bundle(name, bundle)\n\n self._write_out(self.js_resources, '.js')\n\n def load_js_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n if bundle.merge_with != self.name:\n return\n if bundle.jscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_js_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.js_resources:\n return\n resource = get_resource(self.context, bundle.jscompilation)\n if not resource:\n return\n self.js_resources[name] = resource\n\n def _write_out(self, resources, postfix):\n fi = StringIO()\n for bname, script in resources.items():\n fi.write('''\n// Start Bundle: {0}\n{1}\n// End Bundle: {2}\n'''.format(bname, script, bname))\n self.folder.writeFile(self.name + postfix, fi)\n resources.clear()\n\n def load_css_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n\n if bundle.merge_with != self.name:\n return\n\n if bundle.csscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_css_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.css_resources:\n return\n\n css = get_resource(self.context, bundle.csscompilation)\n if not css:\n return\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r'''(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))''',\n r'\\1%s/' % path,\n css)\n self.css_resources[name] = css\n\n def write_css(self):\n for name, bundle in self.bundles.items():\n self.load_css_bundle(name, bundle)\n\n self._write_out(self.css_resources, '.css')\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile('timestamp.txt', fi)\n\n # generate new combined bundles\n default_writer = MetaBundleWriter(\n context, production_folder, 'default')\n default_writer.write_js()\n logged_in_writer = MetaBundleWriter(\n context, production_folder, 'logged-in')\n logged_in_writer.write_js()\n default_writer.write_css()\n logged_in_writer.write_css()\n",
"path": "Products/CMFPlone/resources/browser/combine.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom Acquisition import aq_base\nfrom datetime import datetime\nfrom plone.registry.interfaces import IRegistry\nfrom plone.resource.file import FilesystemFile\nfrom plone.resource.interfaces import IResourceDirectory\nfrom Products.CMFPlone.interfaces import IBundleRegistry\nfrom Products.CMFPlone.interfaces.resources import OVERRIDE_RESOURCE_DIRECTORY_NAME # noqa\nfrom StringIO import StringIO\nfrom zExceptions import NotFound\nfrom zope.component import getUtility\nfrom zope.component import queryUtility\n\nfrom collections import OrderedDict\nimport logging\nimport re\n\n\nPRODUCTION_RESOURCE_DIRECTORY = 'production'\nlogger = logging.getLogger(__name__)\n\n\ndef get_production_resource_directory():\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return ''\n container = persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n try:\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n except NotFound:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n if 'timestamp.txt' not in production_folder:\n return '%s/++unique++1' % PRODUCTION_RESOURCE_DIRECTORY\n timestamp = production_folder.readFile('timestamp.txt')\n return '%s/++unique++%s' % (\n PRODUCTION_RESOURCE_DIRECTORY, timestamp)\n\n\ndef get_resource(context, path):\n if path.startswith('++plone++'):\n # ++plone++ resources can be customized, we return their override\n # value if any\n overrides = get_override_directory(context)\n filepath = path[9:]\n if overrides.isFile(filepath):\n return overrides.readFile(filepath)\n\n try:\n resource = context.unrestrictedTraverse(path)\n except NotFound:\n logger.warn(u'Could not find resource {0}. You may have to create it first.'.format(path)) # noqa\n return\n\n if isinstance(resource, FilesystemFile):\n (directory, sep, filename) = path.rpartition('/')\n return context.unrestrictedTraverse(directory).readFile(filename)\n\n # calling the resource may modify the header, i.e. the content-type.\n # we do not want this, so keep the original header intact.\n response_before = context.REQUEST.response\n context.REQUEST.response = response_before.__class__()\n if hasattr(aq_base(resource), 'GET'):\n # for FileResource\n result = resource.GET()\n else:\n # any BrowserView\n result = resource()\n context.REQUEST.response = response_before\n return result\n\n\nclass MetaBundleWriter(object):\n\n def __init__(self, context, folder, name):\n self.context = context\n self.folder = folder\n self.name = name\n self.js_resources = OrderedDict()\n self.css_resources = OrderedDict()\n self.registry = getUtility(IRegistry)\n self.bundles = self.registry.collectionOfInterface(\n IBundleRegistry, prefix='plone.bundles', check=False)\n\n def write_js(self):\n\n # default resources\n if self.name == 'default' and self.registry.records.get(\n 'plone.resources/jquery.js'\n ):\n self.js_resources['_jquery'] = get_resource(\n self.context,\n self.registry.records['plone.resources/jquery.js'].value)\n self.js_resources['_requirejs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.requirejs'].value)\n self.js_resources['_configjs'] = get_resource(\n self.context,\n self.registry.records['plone.resources.configjs'].value)\n\n # bundles\n for name, bundle in self.bundles.items():\n self.load_js_bundle(name, bundle)\n\n self._write_out(self.js_resources, '.js')\n\n def load_js_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n if bundle.merge_with != self.name:\n return\n if bundle.jscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_js_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.js_resources:\n return\n resource = get_resource(self.context, bundle.jscompilation)\n if not resource:\n return\n self.js_resources[name] = resource\n\n def _write_out(self, resources, postfix):\n fi = StringIO()\n for bname, script in resources.items():\n fi.write('''\n/* Start Bundle: {0} */\n{1}\n/* End Bundle: {2} */\n'''.format(bname, script, bname))\n self.folder.writeFile(self.name + postfix, fi)\n resources.clear()\n\n def load_css_bundle(self, name, bundle, depth=0):\n if depth > 10:\n # recursion detection\n return\n\n if bundle.merge_with != self.name:\n return\n\n if bundle.csscompilation:\n if bundle.depends and bundle.depends in self.bundles:\n self.load_css_bundle(\n bundle.depends, self.bundles[bundle.depends], depth + 1)\n if name in self.css_resources:\n return\n\n css = get_resource(self.context, bundle.csscompilation)\n if not css:\n return\n (path, sep, filename) = bundle.csscompilation.rpartition('/')\n # Process relative urls:\n # we prefix with current resource path any url not starting with\n # '/' or http: or data:\n css = re.sub(\n r'''(url\\(['\"]?(?!['\"]?([a-z]+:|\\/)))''',\n r'\\1%s/' % path,\n css)\n self.css_resources[name] = css\n\n def write_css(self):\n for name, bundle in self.bundles.items():\n self.load_css_bundle(name, bundle)\n\n self._write_out(self.css_resources, '.css')\n\n\ndef get_override_directory(context):\n persistent_directory = queryUtility(IResourceDirectory, name='persistent')\n if persistent_directory is None:\n return\n if OVERRIDE_RESOURCE_DIRECTORY_NAME not in persistent_directory:\n persistent_directory.makeDirectory(OVERRIDE_RESOURCE_DIRECTORY_NAME)\n return persistent_directory[OVERRIDE_RESOURCE_DIRECTORY_NAME]\n\n\ndef combine_bundles(context):\n container = get_override_directory(context)\n if PRODUCTION_RESOURCE_DIRECTORY not in container:\n container.makeDirectory(PRODUCTION_RESOURCE_DIRECTORY)\n production_folder = container[PRODUCTION_RESOURCE_DIRECTORY]\n\n # store timestamp\n fi = StringIO()\n fi.write(datetime.now().isoformat())\n production_folder.writeFile('timestamp.txt', fi)\n\n # generate new combined bundles\n default_writer = MetaBundleWriter(\n context, production_folder, 'default')\n default_writer.write_js()\n logged_in_writer = MetaBundleWriter(\n context, production_folder, 'logged-in')\n logged_in_writer.write_js()\n default_writer.write_css()\n logged_in_writer.write_css()\n",
"path": "Products/CMFPlone/resources/browser/combine.py"
}
] | diff --git a/Products/CMFPlone/resources/browser/combine.py b/Products/CMFPlone/resources/browser/combine.py
index 2a0d20a63e..b00b4a7656 100644
--- a/Products/CMFPlone/resources/browser/combine.py
+++ b/Products/CMFPlone/resources/browser/combine.py
@@ -124,9 +124,9 @@ def _write_out(self, resources, postfix):
fi = StringIO()
for bname, script in resources.items():
fi.write('''
-// Start Bundle: {0}
+/* Start Bundle: {0} */
{1}
-// End Bundle: {2}
+/* End Bundle: {2} */
'''.format(bname, script, bname))
self.folder.writeFile(self.name + postfix, fi)
resources.clear()
diff --git a/news/2820.bugfix b/news/2820.bugfix
new file mode 100644
index 0000000000..ece21fef4e
--- /dev/null
+++ b/news/2820.bugfix
@@ -0,0 +1 @@
+Don't use // as bundle separators in the resource registry's meta bundle generator. It comments the first css construct in every bundle that follows. [fredvd]
\ No newline at end of file
| Plone 5.1.4 to 5.1.5 update: resource registry meta bundle generator comments first css construct of individual bundles
In one of our projects, after upgrading from Plone 5.1.4 to Plone 5.1.5 A very small part of the css became broken in plone.app.mosaic layouts . Images inside a tile no longer had a "height: auto" on them. This is normally included in mosaic-styles.css , and the mosaic styles were included in default.css.
We quickly patched the missing statement into our theme file and did a patch release, but the underlying problem was vague. The problem would only appear on production, running the site locally did not show the problem, so my attention was pulled to the metabundle generation. This was modified between 5.1.4 and 5.1.5 in https://github.com/plone/Products.CMFPlone/commit/397918cd39ba0be4e2e150df5f5f2220e6ecc828 by @vangheem
The problematic code is in this part:
https://github.com/plone/Products.CMFPlone/blob/2195c4a43ba100fb2b7973dccb4299dad2de42fe/Products/CMFPlone/resources/browser/combine.py#L123-L132
The individual bundles are separated by comment lines with // Start bundle and // End Bundle, but // actually comments out the first following css construct , more info at https://www.xanthir.com/b4U10
And the mosaic-styles.css individual bundle start with:
```
// Start Bundle: mosaic-styles
/* Images will never be bigger then a tile */
.mosaic-tile img {
max-width: 100%;
height: auto;
}
```
If even skips over the /* */ comment on the next line and comments the first {} it sees
So that is how only our height: auto; got disabled in production .
This is at the moment only broken in Plone 5.1 , In Plone 5.2 the whole bundler got rewritten again, partly because of Python3 support and doesn't seem to insert these comments. I have swapped the // comment for /* */ comments and this also solves the problem. I'll create a pull request shortly.
|
lightly-ai__lightly-1177 | [
{
"content": "\"\"\" Contrastive Loss Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.utils import dist\n\n\nclass NTXentLoss(MemoryBankModule):\n \"\"\"Implementation of the Contrastive Cross Entropy Loss.\n\n This implementation follows the SimCLR[0] paper. If you enable the memory\n bank by setting the `memory_bank_size` value > 0 the loss behaves like\n the one described in the MoCo[1] paper.\n\n - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709\n - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722\n\n Attributes:\n temperature:\n Scale logits by the inverse of the temperature.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.\n gather_distributed:\n If True then negatives from all gpus are gathered before the\n loss calculation. This flag has no effect if memory_bank_size > 0.\n\n Raises:\n ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.\n\n Examples:\n\n >>> # initialize loss function without memory bank\n >>> loss_fn = NTXentLoss(memory_bank_size=0)\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through SimCLR or MoCo model\n >>> batch = torch.cat((t0, t1), dim=0)\n >>> output = model(batch)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(output)\n\n \"\"\"\n\n def __init__(\n self,\n temperature: float = 0.5,\n memory_bank_size: int = 0,\n gather_distributed: bool = False,\n ):\n super(NTXentLoss, self).__init__(size=memory_bank_size)\n self.temperature = temperature\n self.gather_distributed = gather_distributed\n self.cross_entropy = nn.CrossEntropyLoss(reduction=\"mean\")\n self.eps = 1e-8\n\n if abs(self.temperature) < self.eps:\n raise ValueError(\n \"Illegal temperature: abs({}) < 1e-8\".format(self.temperature)\n )\n\n def forward(self, out0: torch.Tensor, out1: torch.Tensor):\n \"\"\"Forward pass through Contrastive Cross-Entropy Loss.\n\n If used with a memory bank, the samples from the memory bank are used\n as negative examples. Otherwise, within-batch samples are used as\n negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n Shape: (batch_size, embedding_size)\n out1:\n Output projections of the second set of transformed images.\n Shape: (batch_size, embedding_size)\n\n Returns:\n Contrastive Cross Entropy Loss value.\n\n \"\"\"\n\n device = out0.device\n batch_size, _ = out0.shape\n\n # normalize the output to length 1\n out0 = nn.functional.normalize(out0, dim=1)\n out1 = nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if\n # out1 requires a gradient, otherwise keep the same vectors in the\n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # out1: shape: (batch_size, embedding_size)\n # negatives: shape: (embedding_size, memory_bank_size)\n out1, negatives = super(NTXentLoss, self).forward(\n out1, update=out0.requires_grad\n )\n\n # We use the cosine similarity, which is a dot product (einsum) here,\n # as all vectors are already normalized to unit length.\n # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.\n\n if negatives is not None:\n # use negatives from memory bank\n negatives = negatives.to(device)\n\n # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity\n # of the i-th sample in the batch to its positive pair\n sim_pos = torch.einsum(\"nc,nc->n\", out0, out1).unsqueeze(-1)\n\n # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity\n # of the i-th sample to the j-th negative sample\n sim_neg = torch.einsum(\"nc,ck->nk\", out0, negatives)\n\n # set the labels to the first \"class\", i.e. sim_pos,\n # so that it is maximized in relation to sim_neg\n logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature\n labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)\n\n else:\n # user other samples from batch as negatives\n # and create diagonal mask that only selects similarities between\n # views of the same image\n if self.gather_distributed and dist.world_size() > 1:\n # gather hidden representations from other processes\n out0_large = torch.cat(dist.gather(out0), 0)\n out1_large = torch.cat(dist.gather(out1), 0)\n diag_mask = dist.eye_rank(batch_size, device=out0.device)\n else:\n # single process\n out0_large = out0\n out1_large = out1\n diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)\n\n # calculate similiarities\n # here n = batch_size and m = batch_size * world_size\n # the resulting vectors have shape (n, m)\n logits_00 = torch.einsum(\"nc,mc->nm\", out0, out0_large) / self.temperature\n logits_01 = torch.einsum(\"nc,mc->nm\", out0, out1_large) / self.temperature\n logits_10 = torch.einsum(\"nc,mc->nm\", out1, out0_large) / self.temperature\n logits_11 = torch.einsum(\"nc,mc->nm\", out1, out1_large) / self.temperature\n\n # remove simliarities between same views of the same image\n logits_00 = logits_00[~diag_mask].view(batch_size, -1)\n logits_11 = logits_11[~diag_mask].view(batch_size, -1)\n\n # concatenate logits\n # the logits tensor in the end has shape (2*n, 2*m-1)\n logits_0100 = torch.cat([logits_01, logits_00], dim=1)\n logits_1011 = torch.cat([logits_10, logits_11], dim=1)\n logits = torch.cat([logits_0100, logits_1011], dim=0)\n\n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n\n loss = self.cross_entropy(logits, labels)\n\n return loss\n",
"path": "lightly/loss/ntx_ent_loss.py"
}
] | [
{
"content": "\"\"\" Contrastive Loss Functions \"\"\"\n\n# Copyright (c) 2020. Lightly AG and its affiliates.\n# All Rights Reserved\n\nimport torch\nfrom torch import nn\n\nfrom lightly.loss.memory_bank import MemoryBankModule\nfrom lightly.utils import dist\n\n\nclass NTXentLoss(MemoryBankModule):\n \"\"\"Implementation of the Contrastive Cross Entropy Loss.\n\n This implementation follows the SimCLR[0] paper. If you enable the memory\n bank by setting the `memory_bank_size` value > 0 the loss behaves like\n the one described in the MoCo[1] paper.\n\n - [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709\n - [1] MoCo, 2020, https://arxiv.org/abs/1911.05722\n\n Attributes:\n temperature:\n Scale logits by the inverse of the temperature.\n memory_bank_size:\n Number of negative samples to store in the memory bank.\n Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536.\n gather_distributed:\n If True then negatives from all gpus are gathered before the\n loss calculation. This flag has no effect if memory_bank_size > 0.\n\n Raises:\n ValueError: If abs(temperature) < 1e-8 to prevent divide by zero.\n\n Examples:\n\n >>> # initialize loss function without memory bank\n >>> loss_fn = NTXentLoss(memory_bank_size=0)\n >>>\n >>> # generate two random transforms of images\n >>> t0 = transforms(images)\n >>> t1 = transforms(images)\n >>>\n >>> # feed through SimCLR or MoCo model\n >>> batch = torch.cat((t0, t1), dim=0)\n >>> output = model(batch)\n >>>\n >>> # calculate loss\n >>> loss = loss_fn(output)\n\n \"\"\"\n\n def __init__(\n self,\n temperature: float = 0.5,\n memory_bank_size: int = 0,\n gather_distributed: bool = False,\n ):\n super(NTXentLoss, self).__init__(size=memory_bank_size)\n self.temperature = temperature\n self.gather_distributed = gather_distributed\n self.cross_entropy = nn.CrossEntropyLoss(reduction=\"mean\")\n self.eps = 1e-8\n\n if abs(self.temperature) < self.eps:\n raise ValueError(\n \"Illegal temperature: abs({}) < 1e-8\".format(self.temperature)\n )\n\n def forward(self, out0: torch.Tensor, out1: torch.Tensor):\n \"\"\"Forward pass through Contrastive Cross-Entropy Loss.\n\n If used with a memory bank, the samples from the memory bank are used\n as negative examples. Otherwise, within-batch samples are used as\n negative samples.\n\n Args:\n out0:\n Output projections of the first set of transformed images.\n Shape: (batch_size, embedding_size)\n out1:\n Output projections of the second set of transformed images.\n Shape: (batch_size, embedding_size)\n\n Returns:\n Contrastive Cross Entropy Loss value.\n\n \"\"\"\n\n device = out0.device\n batch_size, _ = out0.shape\n\n # normalize the output to length 1\n out0 = nn.functional.normalize(out0, dim=1)\n out1 = nn.functional.normalize(out1, dim=1)\n\n # ask memory bank for negative samples and extend it with out1 if\n # out1 requires a gradient, otherwise keep the same vectors in the\n # memory bank (this allows for keeping the memory bank constant e.g.\n # for evaluating the loss on the test set)\n # out1: shape: (batch_size, embedding_size)\n # negatives: shape: (embedding_size, memory_bank_size)\n out1, negatives = super(NTXentLoss, self).forward(\n out1, update=out0.requires_grad\n )\n\n # We use the cosine similarity, which is a dot product (einsum) here,\n # as all vectors are already normalized to unit length.\n # Notation in einsum: n = batch_size, c = embedding_size and k = memory_bank_size.\n\n if negatives is not None:\n # use negatives from memory bank\n negatives = negatives.to(device)\n\n # sim_pos is of shape (batch_size, 1) and sim_pos[i] denotes the similarity\n # of the i-th sample in the batch to its positive pair\n sim_pos = torch.einsum(\"nc,nc->n\", out0, out1).unsqueeze(-1)\n\n # sim_neg is of shape (batch_size, memory_bank_size) and sim_neg[i,j] denotes the similarity\n # of the i-th sample to the j-th negative sample\n sim_neg = torch.einsum(\"nc,ck->nk\", out0, negatives)\n\n # set the labels to the first \"class\", i.e. sim_pos,\n # so that it is maximized in relation to sim_neg\n logits = torch.cat([sim_pos, sim_neg], dim=1) / self.temperature\n labels = torch.zeros(logits.shape[0], device=device, dtype=torch.long)\n\n else:\n # user other samples from batch as negatives\n # and create diagonal mask that only selects similarities between\n # views of the same image\n if self.gather_distributed and dist.world_size() > 1:\n # gather hidden representations from other processes\n out0_large = torch.cat(dist.gather(out0), 0)\n out1_large = torch.cat(dist.gather(out1), 0)\n diag_mask = dist.eye_rank(batch_size, device=out0.device)\n else:\n # single process\n out0_large = out0\n out1_large = out1\n diag_mask = torch.eye(batch_size, device=out0.device, dtype=torch.bool)\n\n # calculate similiarities\n # here n = batch_size and m = batch_size * world_size\n # the resulting vectors have shape (n, m)\n logits_00 = torch.einsum(\"nc,mc->nm\", out0, out0_large) / self.temperature\n logits_01 = torch.einsum(\"nc,mc->nm\", out0, out1_large) / self.temperature\n logits_10 = torch.einsum(\"nc,mc->nm\", out1, out0_large) / self.temperature\n logits_11 = torch.einsum(\"nc,mc->nm\", out1, out1_large) / self.temperature\n\n # remove simliarities between same views of the same image\n logits_00 = logits_00[~diag_mask].view(batch_size, -1)\n logits_11 = logits_11[~diag_mask].view(batch_size, -1)\n\n # concatenate logits\n # the logits tensor in the end has shape (2*n, 2*m-1)\n logits_0100 = torch.cat([logits_01, logits_00], dim=1)\n logits_1011 = torch.cat([logits_10, logits_11], dim=1)\n logits = torch.cat([logits_0100, logits_1011], dim=0)\n\n # create labels\n labels = torch.arange(batch_size, device=device, dtype=torch.long)\n if self.gather_distributed:\n labels = labels + dist.rank() * batch_size\n labels = labels.repeat(2)\n\n loss = self.cross_entropy(logits, labels)\n\n return loss\n",
"path": "lightly/loss/ntx_ent_loss.py"
}
] | diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py
index 05c157f3c..f8ce8ae52 100644
--- a/lightly/loss/ntx_ent_loss.py
+++ b/lightly/loss/ntx_ent_loss.py
@@ -161,7 +161,8 @@ def forward(self, out0: torch.Tensor, out1: torch.Tensor):
# create labels
labels = torch.arange(batch_size, device=device, dtype=torch.long)
- labels = labels + dist.rank() * batch_size
+ if self.gather_distributed:
+ labels = labels + dist.rank() * batch_size
labels = labels.repeat(2)
loss = self.cross_entropy(logits, labels)
| CUDA errors in NTXentLoss with gloo backend in multi-gpu training
I was wondering if the `gloo` distributed communication package for multi-gpu training is officially supported by lightly. It seems like e.g. NTXentLoss doesn't work with `gloo` (I'm using pytorch lightning): I get CUDA errors, even when setting `gather_distributed = False`.
I can fix the issue when using `gather_distributed = False` by replacing the line
https://github.com/lightly-ai/lightly/blob/master/lightly/loss/ntx_ent_loss.py#L164
by
```python
labels = labels + batch_size * (dist.rank() if gather_distributed else 0)
```
but then of course I can't use `gather_distributed = True` anymore.
Using the `nccl` backend, everything works fine, but `nccl` is not working well on some of our machines, so unfortunately I'm stuck with `gloo`. I think using `gloo` might be too much of an exception to fix the problem for `gather_distributed = True`, but maybe it'd help to just replace the line above and mention somewhere in the documentation that `gather_distributed` is not supported for `gloo`?
|
mars-project__mars-291 | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 1800:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "docs/source/norm_zh.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis file folds Chinese po files by hacking babel.messages.pofile.normalize\nusing jieba text segment library instead of regex\n\"\"\"\n\nimport datetime\nimport os\n\nfrom babel.messages import pofile\nfrom babel.messages.pofile import escape\n\n\ndef _zh_len(s):\n \"\"\"\n Calculate text length in Chinese\n \"\"\"\n try:\n return len(s.encode('gb2312'))\n except ValueError:\n return len(s)\n\n\ndef _zh_split(s):\n \"\"\"\n Split text length in Chinese\n \"\"\"\n import jieba\n try:\n s.encode('ascii')\n has_zh = False\n except ValueError:\n has_zh = True\n\n if has_zh:\n return list(jieba.cut(s))\n else:\n return pofile.WORD_SEP.split(s)\n\n\n# code modified from babel.messages.pofile (hash 359ecffca479dfe032d0f7210d5cd8160599c816)\ndef _normalize(string, prefix='', width=76):\n r\"\"\"Convert a string into a format that is appropriate for .po files.\n >>> print(normalize('''Say:\n ... \"hello, world!\"\n ... ''', width=None))\n \"\"\n \"Say:\\n\"\n \" \\\"hello, world!\\\"\\n\"\n >>> print(normalize('''Say:\n ... \"Lorem ipsum dolor sit amet, consectetur adipisicing elit, \"\n ... ''', width=32))\n \"\"\n \"Say:\\n\"\n \" \\\"Lorem ipsum dolor sit \"\n \"amet, consectetur adipisicing\"\n \" elit, \\\"\\n\"\n :param string: the string to normalize\n :param prefix: a string that should be prepended to every line\n :param width: the maximum line width; use `None`, 0, or a negative number\n to completely disable line wrapping\n \"\"\"\n\n if width and width > 0:\n prefixlen = _zh_len(prefix)\n lines = []\n for line in string.splitlines(True):\n if _zh_len(escape(line)) + prefixlen > width:\n chunks = _zh_split(line)\n chunks.reverse()\n while chunks:\n buf = []\n size = 2\n while chunks:\n l = _zh_len(escape(chunks[-1])) - 2 + prefixlen # noqa: E741\n if size + l < width:\n buf.append(chunks.pop())\n size += l\n else:\n if not buf:\n # handle long chunks by putting them on a\n # separate line\n buf.append(chunks.pop())\n break\n lines.append(u''.join(buf))\n else:\n lines.append(line)\n else:\n lines = string.splitlines(True)\n\n if len(lines) <= 1:\n return escape(string)\n\n # Remove empty trailing line\n if lines and not lines[-1]:\n del lines[-1]\n lines[-1] += '\\n'\n return u'\"\"\\n' + u'\\n'.join([(prefix + escape(line)) for line in lines])\n\n\ndef main():\n try:\n import jieba # noqa: F401\n except ImportError:\n return\n\n pofile.normalize = _normalize\n for root, dirs, files in os.walk('.'):\n if 'zh' not in root:\n continue\n for f in files:\n if not f.endswith('.po'):\n continue\n path = os.path.join(root, f)\n\n # only modify recent-changed files\n modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))\n if (datetime.datetime.now() - modify_time).total_seconds() > 120:\n continue\n\n with open(path, 'rb') as inpf:\n catalog = pofile.read_po(inpf)\n with open(path, 'wb') as outf:\n pofile.write_po(outf, catalog)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "docs/source/norm_zh.py"
}
] | diff --git a/docs/Makefile b/docs/Makefile
index e6c03e83a8..b684193665 100644
--- a/docs/Makefile
+++ b/docs/Makefile
@@ -13,6 +13,9 @@ BUILDDIR = build
I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(SOURCEDIR)
I18NSPHINXLANGS = -l zh_CN
+# make mars code available for sphinx builder
+export PYTHONPATH := ../
+
# Put it first so that "make" without argument is like "make help".
help:
@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/source/contributing.rst b/docs/source/contributing.rst
index 29b37c296b..e6c7ff8e5b 100644
--- a/docs/source/contributing.rst
+++ b/docs/source/contributing.rst
@@ -128,3 +128,13 @@ After that you can translate Mars documents into your language. Note that when
you run ``make gettext`` again, translations will be broken into a fixed-width
text. For Chinese translators, you need to install ``jieba`` to get this
effect.
+
+When you finish translation, you can run
+
+.. code-block:: bash
+
+ cd docs
+ # change LANG into the language you want to build
+ make -e SPHINXOPTS="-D language='LANG'" html
+
+to build the document in the language you just translated into.
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/contributing.po b/docs/source/locale/zh_CN/LC_MESSAGES/contributing.po
index 3748c41bf9..dfdc2de1cb 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/contributing.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/contributing.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-02-15 19:14+0800\n"
+"POT-Creation-Date: 2019-03-13 13:14+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -209,3 +209,11 @@ msgstr ""
"make gettext``,翻译后的文字将被转为固定宽度文字。对于中文译者,为达到该"
"效果,需要安装 ``jieba`` 分词包。"
+#: ../../source/contributing.rst:132
+msgid "When you finish translation, you can run"
+msgstr "在完成翻译后,你可以执行"
+
+#: ../../source/contributing.rst:140
+msgid "to build the document in the language you just translated into."
+msgstr "以使用你刚才翻译的语言编译文档。"
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/distributed/fault-tolerance.po b/docs/source/locale/zh_CN/LC_MESSAGES/distributed/fault-tolerance.po
index a93248cb9e..12c20a2c3f 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/distributed/fault-tolerance.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/distributed/fault-tolerance.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-02-19 13:49+0800\n"
+"POT-Creation-Date: 2019-03-13 11:41+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -27,7 +27,7 @@ msgid ""
"worker-level. Scheduler-level support is not implemented now."
msgstr ""
"当下,Mars 在两个层面上有容错支持:进程级别容错和 Worker 级别容错,尚未"
-"实现 Scheduler 级别容错"
+"实现 Scheduler 级别容错。"
#: ../../source/distributed/fault-tolerance.rst:8
msgid "Process-level Fault Tolerance"
@@ -51,8 +51,8 @@ msgid "Worker-level Fault Tolerance"
msgstr "Worker 容错"
#: ../../source/distributed/fault-tolerance.rst:20
-msgid "New in version 0.2.0a3"
-msgstr ""
+msgid "New in version 0.2.0a2"
+msgstr "自 0.2.0a2 起支持"
#: ../../source/distributed/fault-tolerance.rst:22
msgid ""
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/index.po b/docs/source/locale/zh_CN/LC_MESSAGES/index.po
index 5dc6ea6629..1edd211345 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/index.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/index.po
@@ -23,7 +23,7 @@ msgstr "Mars"
#: ../../source/index.rst:9
msgid "Mars is a tensor-based unified framework for large-scale data computation."
-msgstr "Mars 是基于张量的,用来进行大规模数据计算的统一计算框架"
+msgstr "Mars 是基于张量的,用于进行大规模数据计算的统一计算框架"
#: ../../source/index.rst:12
msgid "Mars tensor"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/install.po b/docs/source/locale/zh_CN/LC_MESSAGES/install.po
index 5f7dbf4d46..8be388ecda 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/install.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/install.po
@@ -120,12 +120,13 @@ msgid ""
"computed."
msgstr ""
"Mars Worker 管理两个不同部分的内存,第一部分为每个进程的私有内存,第二"
-"部分是使用 Apache Arrow <https://arrow.apache.org/docs/python/plasma.html"
-">`_ 管理的共享内存。当 Mars Worker 启动,它将默认使用当前可用内存的 50% "
-"作为共享内存,将剩余部分作为各进程私有内存使用。与此同时,Mars 为内存分配"
-"提供了软限制和硬限制,分别默认为 75% 和 90%。如果这些选项不满足你的需要,"
-"你可以使用 ``--cache-mem`` 参数配置共享内存的大小,使用 ``--phy-mem`` "
-"参数配置总内存大小,软限制和硬限制将从这些数值计算。"
+"部分是使用 `Apache Arrow 中的 plasma_store <https://arrow.apache.org/docs"
+"/python/plasma.html>`_ 管理的共享内存。当 Mars Worker 启动,它将默认使用"
+"当前可用内存的 50% 作为共享内存,将剩余部分作为各进程私有内存使用。"
+"与此同时,Mars 为内存分配提供了软限制和硬限制,分别默认为 75% 和 90%。"
+"如果这些选项不满足你的需要,你可以使用 ``--cache-mem`` 参数配置共享内存的"
+"大小,使用 ``--phy-mem`` 参数配置总内存大小,软限制和硬限制将从这些数值"
+"计算。"
#: ../../source/install.rst:129
msgid "For instance, by using"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/binary.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/binary.po
index 4d2a0d96d0..6ae92526d6 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/binary.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/binary.po
@@ -19,7 +19,7 @@ msgstr ""
#: ../../source/tensor/binary.rst:2
msgid "Binary Operations"
-msgstr "二元操作"
+msgstr "二元运算"
#: ../../source/tensor/binary.rst:5
msgid "Elementwise bit operations"
@@ -31,7 +31,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the bit-wise AND of two tensors element-wise."
-msgstr ""
+msgstr "按元素对两个张量按位求与"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.bitwise_or <mars.tensor.bitwise_or>`"
@@ -39,7 +39,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the bit-wise OR of two tensors element-wise."
-msgstr ""
+msgstr "按元素对两个张量按位求或"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.bitwise_xor <mars.tensor.bitwise_xor>`"
@@ -47,7 +47,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the bit-wise XOR of two arrays element-wise."
-msgstr ""
+msgstr "按元素对两个张量按位求异或"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.invert <mars.tensor.invert>`"
@@ -55,7 +55,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute bit-wise inversion, or bit-wise NOT, element-wise."
-msgstr ""
+msgstr "按元素对张量按位求反"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.left_shift <mars.tensor.left_shift>`"
@@ -63,7 +63,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Shift the bits of an integer to the left."
-msgstr ""
+msgstr "按元素对张量按位左移"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.right_shift <mars.tensor.right_shift>`"
@@ -71,5 +71,5 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Shift the bits of an integer to the right."
-msgstr ""
+msgstr "按元素对张量按位右移"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/creation.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/creation.po
index 5ee50e2705..4cee58411c 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/creation.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/creation.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-04 11:27+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,11 +19,11 @@ msgstr ""
#: ../../source/tensor/creation.rst:2
msgid "Tensor Creation Routines"
-msgstr "Tensor创建例程"
+msgstr "创建 Tensor"
#: ../../source/tensor/creation.rst:5
msgid "Basic creation routines"
-msgstr "基本的创建例程"
+msgstr "基本的创建函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.empty <mars.tensor.empty>`"
@@ -99,7 +99,7 @@ msgstr ""
#: ../../source/tensor/creation.rst:23
msgid "Creation from other data"
-msgstr ""
+msgstr "从其他数据创建"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.array <mars.tensor.array>`"
@@ -119,7 +119,7 @@ msgstr ""
#: ../../source/tensor/creation.rst:34
msgid "Numerical ranges"
-msgstr ""
+msgstr "从数值范围创建"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.arange <mars.tensor.arange>`"
@@ -159,7 +159,7 @@ msgstr ""
#: ../../source/tensor/creation.rst:48
msgid "Building matrices"
-msgstr ""
+msgstr "构建矩阵"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.diag <mars.tensor.diag>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/datasource.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/datasource.po
index fb1d673562..a9c466c063 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/datasource.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/datasource.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-01-11 15:52+0800\n"
+"POT-Creation-Date: 2019-03-13 12:47+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: Chen Quan <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -19,17 +19,17 @@ msgstr ""
#: ../../source/tensor/datasource.rst:2
msgid "Create Mars tensor"
-msgstr "创建Mars张量"
+msgstr "创建 Mars 张量"
#: ../../source/tensor/datasource.rst:4
msgid ""
"You can create mars tensor from Python array like object just like Numpy,"
" or create from Numpy array directly. More details on :doc:`array "
-"creation routine <routines/creation>` and :doc:`random sampling "
-"<routines/random>`."
+"creation routine <creation>` and :doc:`random sampling <random>`."
msgstr ""
-"您可以像Numpy一样从Python数组对象中创建mars张量,"
-"或者直接从Numpy数组创建。关于 :doc:`数组创建例程<routines/creation>` 和 :doc:`随机抽样<routines/random>` 的更多细节。
+"您可以像 Numpy 一样从 Python 数组创建 Mars 张量,或者直接从 Numpy 数组"
+"创建。关于 :doc:`从数组创建 <creation>` 和 :doc:`随机抽样 <random>` 的更"
+"多细节"
#: ../../source/tensor/datasource.rst:14:<autosummary>:1
msgid ":obj:`mars.tensor.tensor <mars.tensor.tensor>`"
@@ -45,14 +45,15 @@ msgstr "创建一个张量"
#: ../../source/tensor/datasource.rst:16
msgid "Create tensor on GPU"
-msgstr "在GPU上创建张量"
+msgstr "在 GPU 上创建张量"
#: ../../source/tensor/datasource.rst:18
msgid ""
"Mars tensor can run on GPU, for tensor creation, just add a ``gpu`` "
"parameter, and set it to ``True``."
msgstr ""
-"Mars张量可以在GPU上运行,用于张量创建,只需添加一个 ``gpu`` 参数,然后将其设置为 ``True`` 。"
+"Mars张量可以在GPU上运行,用于张量创建,只需添加一个 ``gpu`` 参数,然后将"
+"其设置为 ``True`` 。"
#: ../../source/tensor/datasource.rst:28
msgid "Create sparse tensor"
@@ -63,7 +64,8 @@ msgid ""
"Mars tensor can be sparse, unfortunately, only 2-D sparse tensors are "
"supported for now, multi-dimensional tensor will be supported later soon."
msgstr ""
-"Mars张量可以是稀疏的,不幸的是,现在只支持二维稀疏张量,很快就会支持多维张量。"
+"Mars 张量可以是稀疏的,但目前只支持二维稀疏张量,在不远的将来将会支持多维"
+"张量。"
#: ../../source/tensor/datasource.rst:41
msgid "Chunks"
@@ -78,43 +80,39 @@ msgid ""
"fact is that chunk's size may effect heavily on the performance of "
"execution."
msgstr ""
-"Mars 张量中,我们将张量分块处理。"
-" ``chunk_size`` 不是必须的,对于默认设置,块的字节占用将为128M。"
-"但是,用户可以根据数据规模以更灵活的方式指定每个块的大小。"
-"事实是,块的大小可能会严重影响执行的性能。"
+"Mars 张量中,我们将张量分块处理。 ``chunk_size`` 不是必须的,对于默认设置"
+",块的字节占用将为128M。用户也可以根据数据规模以更灵活的方式指定每个块的"
+"大小。需要注意的是,块的大小可能会严重影响执行的性能。"
#: ../../source/tensor/datasource.rst:48
msgid ""
"The options or arguments which will effect the chunk's size are listed "
"below:"
-msgstr ""
-"下面列出了影响块大小的选项或参数:"
+msgstr "下面列出了影响块大小的选项或参数:"
#: ../../source/tensor/datasource.rst:50
msgid ""
"Change ``options.tensor.chunk_size_limit`` which is 128*1024*1024(128M) "
"by default."
msgstr ""
-" ``options.tensor.chunk_size_limit`` 默认情况下更改为128 * 1024 * 1024(128M)。"
+" ``options.tensor.chunk_size_limit`` 默认情况下更改为128 * 1024 * 1024("
+"128M)。"
#: ../../source/tensor/datasource.rst:51
msgid ""
"Specify ``chunk_size`` as integer, like ``5000``, means chunk's size is "
"5000 at most for all dimensions"
-msgstr ""
-"指定 ``chunk_size`` 为整数, ``5000`` 表示所有维度的块大小最多为5000"
+msgstr "指定 ``chunk_size`` 为整数, ``5000`` 表示所有维度的块大小最多为 5000"
#: ../../source/tensor/datasource.rst:52
msgid "Specify ``chunk_size`` as tuple, like ``(5000, 3000)``"
-msgstr ""
-"指定 ``chunk_size`` 为元组,如 ``(5000, 3000)`` "
+msgstr "指定 ``chunk_size`` 为元组,如 ``(5000, 3000)`` "
#: ../../source/tensor/datasource.rst:53
msgid ""
"Explicitly define sizes of all chunks along all dimensions, like "
"``((5000, 5000, 2000), (2000, 1000))``"
-msgstr ""
-"明确定义所有维度的所有块的大小,例如 ((5000, 5000, 2000), (2000, 1000))"
+msgstr "明确定义所有维度的所有块的大小,例如 ((5000, 5000, 2000), (2000, 1000))"
#: ../../source/tensor/datasource.rst:56
msgid "Chunks Examples"
@@ -122,13 +120,11 @@ msgstr "块的例子"
#: ../../source/tensor/datasource.rst:58
msgid "Assume we have such a tensor with the data shown below."
-msgstr ""
-"假设我们有包含如下数据的张量"
+msgstr "假设我们有包含如下数据的张量"
#: ../../source/tensor/datasource.rst:71
-msgid "We will show how different ``chunk_size=`` arguments will tile the tensor."
-msgstr ""
-"我们将展示不同的 ``chunk_size=`` 如何影响张量的分块。"
+msgid "We will show how different ``chunk_size`` arguments will tile the tensor."
+msgstr "我们将展示不同的 ``chunk_size`` 如何影响张量的分块。"
#: ../../source/tensor/datasource.rst:73
msgid "``chunk_size=3``:"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/eager-mode.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/eager-mode.po
index c1f09829b7..abd882c727 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/eager-mode.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/eager-mode.po
@@ -23,35 +23,31 @@ msgstr "Eager 模式"
#: ../../source/tensor/eager-mode.rst:4
msgid "New in version 0.2.0a2"
-msgstr ""
+msgstr "自 0.2.0a2 起支持"
#: ../../source/tensor/eager-mode.rst:6
msgid ""
"Mars supports eager mode which makes it friendly for developing and easy "
"to debug."
-msgstr ""
-"Mars支持eager模式,使其易于开发和易于调试。"
+msgstr "Mars 支持 Eager 模式以方便开发和调试。"
#: ../../source/tensor/eager-mode.rst:9
msgid ""
"Users can enable the eager mode by options, set options at the beginning "
"of the program or console session."
-msgstr ""
-"用户可以通过选项启用eager模式,在程序开头或控制台会话中设置选项。"
+msgstr "用户可以通过选项启用 Eager 模式,在程序开头或控制台会话中设置选项。"
#: ../../source/tensor/eager-mode.rst:18
msgid "Or use a context."
-msgstr ""
-"或者使用上下文。"
+msgstr "或者使用上下文。"
#: ../../source/tensor/eager-mode.rst:29
msgid ""
"If eager mode is on, tensor will be executed immediately by default "
"session once it is created."
-msgstr ""
-"如果打开了eager模式,则会在创建默认会话后立即执行tensor。"
+msgstr "如果打开了 Eager 模式,则会在创建默认会话后立即执行 tensor。"
#: ../../source/tensor/eager-mode.rst:43
msgid "Use ``fetch`` to obtain numpy value from a tensor:"
-msgstr ""
-"使用 ``fetch`` 从张量中获取numpy值:"
\ No newline at end of file
+msgstr "使用 ``fetch`` 从张量中获取 Numpy 值:"
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/execution.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/execution.po
index 6f2e8b8a2e..695a2623a8 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/execution.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/execution.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2018-11-28 19:54+0800\n"
+"POT-Creation-Date: 2019-03-13 13:07+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: Chen Quan <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -23,53 +23,55 @@ msgstr "本地执行"
#: ../../source/tensor/execution.rst:4
msgid ""
-"Mars tensor will not be executed unless users call ``execute`` or "
-"``session.run`` methods."
+"When :doc:`eager mode <eager-mode>` is not enabled, which is the default "
+"behavior, Mars tensor will not be executed unless users call ``execute`` "
+"or ``session.run`` methods."
msgstr ""
-"除非用户调用 ``execute`` 或使用 ``session.run`` 方法,否则不会执行Mars张量。"
+"默认情况下,Mars 的 :doc:`Eager 模式 <eager-mode>` 处于关闭状态。此时,"
+"除非用户显示调用 ``execute`` 或使用 ``session.run`` 方法,Mars 张量不会"
+"执行。"
-#: ../../source/tensor/execution.rst:6
+#: ../../source/tensor/execution.rst:8
msgid ""
"If no session is created explicitly, the ``execute`` will create a local "
"session, and mark it as a default session."
msgstr ""
-"如果没有显式创建会话, ``execute`` 方法会创建一个本地会话,并将其标记为默认会话。"
+"如果没有显式创建会话, ``execute`` 方法会创建一个本地会话,并将其标记为"
+"默认会话。"
-#: ../../source/tensor/execution.rst:9
+#: ../../source/tensor/execution.rst:12
msgid "Session"
msgstr "会话"
-#: ../../source/tensor/execution.rst:11
+#: ../../source/tensor/execution.rst:14
msgid ""
"Users can create a new session by ``new_session`` method, if no argument "
"is provided, a local session will be generated."
msgstr ""
-"用户可以通过 ``new_session`` 方法创建新会话,如果未提供参数,则将生成本地会话。"
+"用户可以通过 ``new_session`` 方法创建新会话,如果未提供参数,则将生成本地"
+"会话。"
-#: ../../source/tensor/execution.rst:21
+#: ../../source/tensor/execution.rst:24
msgid ""
"By calling ``as_default`` of a session, the session will be marked as the"
" default session."
-msgstr ""
-"通过调用会话的 ``as_default`` 方法,该会话将被标记为默认会话。"
+msgstr "通过调用会话的 ``as_default`` 方法,该会话将被标记为默认会话。"
-#: ../../source/tensor/execution.rst:29
+#: ../../source/tensor/execution.rst:33
msgid ""
"More than one mars tensors can be passed to ``session.run``, and "
"calculate the results for each tensor."
-msgstr ""
-"可以传递多个mars张量给 ``session.run`` ,并计算每个张量的结果。"
+msgstr "可以传递多个mars张量给 ``session.run`` ,并计算每个张量的结果。"
-#: ../../source/tensor/execution.rst:49
+#: ../../source/tensor/execution.rst:54
msgid "Execute a tensor"
msgstr "执行张量"
-#: ../../source/tensor/execution.rst:51
+#: ../../source/tensor/execution.rst:56
msgid "For a single tensor, ``execute`` can be called."
-msgstr ""
-"可以调用张量的 ``execute`` 方法来执行单个张量。"
+msgstr "可以调用张量的 ``execute`` 方法来执行单个张量。"
+
+#: ../../source/tensor/execution.rst:64
+msgid "Session can be specified by the argument ``session``."
+msgstr "会话可以由参数 ``session`` 指定。"
-#: ../../source/tensor/execution.rst:59
-msgid "Session can be specified by the argument ``session=``."
-msgstr ""
-"会话可以由参数 ``session=`` 指定。"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/fft.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/fft.po
index 11e45b830b..9cd4d8aff1 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/fft.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/fft.po
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/fft.rst:4
msgid "Discrete Fourier Transform"
-msgstr ""
-"离散傅立叶变换"
+msgstr "离散傅立叶变换"
#: ../../source/tensor/fft.rst:8
msgid "Standard FFTs"
-msgstr ""
-"标准离散傅立叶变换"
+msgstr "标准快速傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.fft <mars.tensor.fft.fft>`"
@@ -33,7 +31,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the one-dimensional discrete Fourier Transform."
-msgstr ""
+msgstr "计算一维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.ifft <mars.tensor.fft.ifft>`"
@@ -41,7 +39,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the one-dimensional inverse discrete Fourier Transform."
-msgstr ""
+msgstr "计算一维离散傅立叶逆变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.fft2 <mars.tensor.fft.fft2>`"
@@ -49,7 +47,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the 2-dimensional discrete Fourier Transform"
-msgstr ""
+msgstr "计算二维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.ifft2 <mars.tensor.fft.ifft2>`"
@@ -57,7 +55,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the 2-dimensional inverse discrete Fourier Transform."
-msgstr ""
+msgstr "计算二维离散傅立叶逆变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.fftn <mars.tensor.fft.fftn>`"
@@ -65,7 +63,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the N-dimensional discrete Fourier Transform."
-msgstr ""
+msgstr "计算N维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.ifftn <mars.tensor.fft.ifftn>`"
@@ -73,11 +71,11 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the N-dimensional inverse discrete Fourier Transform."
-msgstr ""
+msgstr "计算N维离散傅立叶逆变换"
#: ../../source/tensor/fft.rst:23
msgid "Real FFTs"
-msgstr ""
+msgstr "实数快速傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.rfft <mars.tensor.fft.rfft>`"
@@ -85,7 +83,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the one-dimensional discrete Fourier Transform for real input."
-msgstr ""
+msgstr "对实数输入计算一维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.irfft <mars.tensor.fft.irfft>`"
@@ -93,7 +91,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the inverse of the n-point DFT for real input."
-msgstr ""
+msgstr "对实数输入的N点一维傅立叶变换结果计算逆变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.rfft2 <mars.tensor.fft.rfft2>`"
@@ -101,7 +99,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the 2-dimensional FFT of a real tensor."
-msgstr ""
+msgstr "对实数输入计算二维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.irfft2 <mars.tensor.fft.irfft2>`"
@@ -109,7 +107,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the 2-dimensional inverse FFT of a real array."
-msgstr ""
+msgstr "对实数输入计算二维离散傅立叶逆变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.rfftn <mars.tensor.fft.rfftn>`"
@@ -117,7 +115,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the N-dimensional discrete Fourier Transform for real input."
-msgstr ""
+msgstr "对实数输入计算N维离散傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.irfftn <mars.tensor.fft.irfftn>`"
@@ -125,11 +123,11 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the inverse of the N-dimensional FFT of real input."
-msgstr ""
+msgstr "对实数输入计算N维离散傅立叶逆变换"
#: ../../source/tensor/fft.rst:38
msgid "Hermitian FFTs"
-msgstr ""
+msgstr "Hermitian 快速傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.hfft <mars.tensor.fft.hfft>`"
@@ -139,7 +137,7 @@ msgstr ""
msgid ""
"Compute the FFT of a signal that has Hermitian symmetry, i.e., a real "
"spectrum."
-msgstr ""
+msgstr "对具备 Hermitian 对称性的信号,例如实频谱,计算傅立叶变换"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.ihfft <mars.tensor.fft.ihfft>`"
@@ -147,11 +145,11 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Compute the inverse FFT of a signal that has Hermitian symmetry."
-msgstr ""
+msgstr "对具备 Hermitian 对称信号 的傅立叶变换结果计算逆变换"
#: ../../source/tensor/fft.rst:49
msgid "Helper routines"
-msgstr ""
+msgstr "辅助函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.fftfreq <mars.tensor.fft.fftfreq>`"
@@ -159,7 +157,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the Discrete Fourier Transform sample frequencies."
-msgstr ""
+msgstr "返回离散傅立叶变换的采样频率"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.rfftfreq <mars.tensor.fft.rfftfreq>`"
@@ -169,7 +167,7 @@ msgstr ""
msgid ""
"Return the Discrete Fourier Transform sample frequencies (for usage with "
"rfft, irfft)."
-msgstr ""
+msgstr "返回离散傅立叶变换的采样频率(适用于实数变换和逆变换)"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.fftshift <mars.tensor.fft.fftshift>`"
@@ -177,7 +175,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Shift the zero-frequency component to the center of the spectrum."
-msgstr ""
+msgstr "将0频率成分移动到频谱中心"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.fft.ifftshift <mars.tensor.fft.ifftshift>`"
@@ -185,5 +183,5 @@ msgstr ""
#: ../../<autosummary>:1
msgid "The inverse of `fftshift`."
-msgstr ""
+msgstr "`fftshift` 的逆操作"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.core.Tensor.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.core.Tensor.po
index 1304a7e76a..53887f9298 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.core.Tensor.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.core.Tensor.po
@@ -8,14 +8,14 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-02-21 15:10+0800\n"
+"POT-Creation-Date: 2019-03-13 11:41+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
"Content-Transfer-Encoding: 8bit\n"
-"Generated-By: Babel 2.5.3\n"
+"Generated-By: Babel 2.6.0\n"
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:2
msgid "mars.tensor.core.Tensor"
@@ -176,7 +176,7 @@ msgid "Dot product of two arrays."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
-msgid ":obj:`Tensor.execute <Tensor.execute>`\\"
+msgid ":obj:`execute <mars.tensor.core.Tensor.execute>`\\ \\(\\[session\\]\\)"
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
@@ -264,7 +264,11 @@ msgid "Return the product of tensor elements over a given axis."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
-msgid ":obj:`Tensor.ravel <Tensor.ravel>`\\"
+msgid ":obj:`ravel <mars.tensor.core.Tensor.ravel>`\\ \\(\\)"
+msgstr ""
+
+#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
+msgid "Return a flattened tensor."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
@@ -284,7 +288,13 @@ msgid "Repeat elements of a tensor."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
-msgid ":obj:`Tensor.reshape <Tensor.reshape>`\\"
+msgid ""
+":obj:`reshape <mars.tensor.core.Tensor.reshape>`\\ \\(shape\\, "
+"\\*shapes\\)"
+msgstr ""
+
+#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
+msgid "Returns a tensor containing the same data with a new shape."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
@@ -372,7 +382,11 @@ msgid ":obj:`Tensor.tosparse <Tensor.tosparse>`\\"
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
-msgid ":obj:`Tensor.transpose <Tensor.transpose>`\\"
+msgid ":obj:`transpose <mars.tensor.core.Tensor.transpose>`\\ \\(\\*axes\\)"
+msgstr ""
+
+#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
+msgid "Returns a view of the tensor with axes transposed."
msgstr ""
#: ../../source/tensor/generated/mars.tensor.core.Tensor.rst:77:<autosummary>:1
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.inner.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.inner.po
new file mode 100644
index 0000000000..f1ce987f54
--- /dev/null
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/generated/mars.tensor.inner.po
@@ -0,0 +1,33 @@
+# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2014-2018, The Alibaba Group Holding Ltd.
+# This file is distributed under the same license as the mars package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, 2019.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: mars \n"
+"Report-Msgid-Bugs-To: \n"
+"POT-Creation-Date: 2019-03-13 14:24+0800\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
+"Language-Team: LANGUAGE <[email protected]>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=utf-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+"Generated-By: Babel 2.6.0\n"
+
+#: ../../source/tensor/generated/mars.tensor.inner.rst:2
+msgid "mars.tensor.inner"
+msgstr ""
+
+#: mars.tensor.inner:1 of
+msgid "Returns the inner product of a and b for arrays of floating point types."
+msgstr ""
+
+#: mars.tensor.inner:3 of
+msgid ""
+"Like the generic NumPy equivalent the product sum is over the last "
+"dimension of a and b. The first argument is not conjugated."
+msgstr ""
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/index.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/index.po
index 7f6167a0d0..f458febb43 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/index.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/index.po
@@ -19,5 +19,5 @@ msgstr ""
#: ../../source/tensor/index.rst:2
msgid "Mars tensor - distributed tensor with NumPy-like API"
-msgstr ""
-"Mars tensor - 提供类似 Numpy API 的分布式张量库"
+msgstr "Mars tensor - 提供类似 Numpy API 的分布式张量库"
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/indexing.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/indexing.po
index 6550f51f04..44e7f9cd47 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/indexing.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/indexing.po
@@ -19,12 +19,11 @@ msgstr ""
#: ../../source/tensor/indexing.rst:2
msgid "Indexing Routines"
-msgstr "索引例程"
+msgstr "索引"
#: ../../source/tensor/indexing.rst:5
msgid "Generating index arrays"
-msgstr ""
-"生成索引数组"
+msgstr "生成索引数组"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.nonzero <mars.tensor.nonzero>`"
@@ -70,7 +69,7 @@ msgstr ""
#: ../../source/tensor/indexing.rst:19
msgid "Indexing-like opeartions"
-msgstr ""
+msgstr "类似索引的操作"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.take <mars.tensor.take>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/linalg.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/linalg.po
index 9f1e7f6b33..773eb45ae0 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/linalg.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/linalg.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2018-05-07 17:08+0800\n"
+"POT-Creation-Date: 2019-03-13 14:24+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: Chen Quan <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -18,83 +18,90 @@ msgstr ""
"Generated-By: Babel 2.6.0\n"
#: ../../source/tensor/linalg.rst:2
-msgid "Linear Algebra (``mars.tensor.linalg``)"
-msgstr ""
-"线性代数 (``mars.tensor.linalg``)"
+msgid "Linear Algebra"
+msgstr "线性代数"
#: ../../source/tensor/linalg.rst:5
msgid "Matrix and vector products"
-msgstr ""
+msgstr "矩阵和向量乘法"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid ":obj:`mars.tensor.dot <mars.tensor.dot>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid "Dot product of two arrays."
-msgstr ""
+msgstr "计算两个数组的点积"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid ":obj:`mars.tensor.vdot <mars.tensor.vdot>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid "Return the dot product of two vectors."
+msgstr "计算两个向量的点积"
+
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
+msgid ":obj:`mars.tensor.inner <mars.tensor.inner>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
+msgid "Returns the inner product of a and b for arrays of floating point types."
+msgstr "对两个数组计算内积"
+
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid ":obj:`mars.tensor.matmul <mars.tensor.matmul>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid "Matrix product of two tensors."
-msgstr ""
+msgstr "计算两个张量的乘法"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid ":obj:`mars.tensor.tensordot <mars.tensor.tensordot>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:16:<autosummary>:1
msgid "Compute tensor dot product along specified axes for tensors >= 1-D."
-msgstr ""
+msgstr "沿指定的坐标计算张量点乘"
-#: ../../source/tensor/linalg.rst:17
+#: ../../source/tensor/linalg.rst:18
msgid "Decompositions"
-msgstr ""
+msgstr "矩阵分解"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid ":obj:`mars.tensor.linalg.cholesky <mars.tensor.linalg.cholesky>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid "Cholesky decomposition."
-msgstr ""
+msgstr "Cholesky 分解"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid ":obj:`mars.tensor.linalg.qr <mars.tensor.linalg.qr>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid "Compute the qr factorization of a matrix."
-msgstr ""
+msgstr "计算矩阵的 QR 分解"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid ":obj:`mars.tensor.linalg.svd <mars.tensor.linalg.svd>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:28:<autosummary>:1
msgid "Singular Value Decomposition."
-msgstr ""
+msgstr "奇异值分解(SVD)"
-#: ../../source/tensor/linalg.rst:29
+#: ../../source/tensor/linalg.rst:30
msgid "Norms and other numbers"
-msgstr ""
+msgstr "范数和其他数值"
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:38:<autosummary>:1
msgid ":obj:`mars.tensor.linalg.norm <mars.tensor.linalg.norm>`"
msgstr ""
-#: ../../<autosummary>:1
+#: ../../source/tensor/linalg.rst:38:<autosummary>:1
msgid "Matrix or vector norm."
-msgstr ""
+msgstr "计算矩阵或向量的范数"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/logic.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/logic.po
index b2366e720f..d17e056fcd 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/logic.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/logic.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-07 16:23+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/logic.rst:2
msgid "Logic Functions"
-msgstr ""
-"逻辑函数"
+msgstr "逻辑函数"
#: ../../source/tensor/logic.rst:5
msgid "Truth value testing"
-msgstr ""
-"真值测试"
+msgstr "真值判定"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.all <mars.tensor.all>`"
@@ -45,7 +43,7 @@ msgstr ""
#: ../../source/tensor/logic.rst:16
msgid "Array contents"
-msgstr ""
+msgstr "数组值判定"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.isfinite <mars.tensor.isfinite>`"
@@ -73,7 +71,7 @@ msgstr ""
#: ../../source/tensor/logic.rst:28
msgid "Array type testing"
-msgstr ""
+msgstr "数组类型判定"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.iscomplex <mars.tensor.iscomplex>`"
@@ -93,7 +91,7 @@ msgstr ""
#: ../../source/tensor/logic.rst:39
msgid "Logic operations"
-msgstr ""
+msgstr "逻辑运算"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.logical_and <mars.tensor.logical_and>`"
@@ -129,7 +127,7 @@ msgstr ""
#: ../../source/tensor/logic.rst:52
msgid "Comparison"
-msgstr ""
+msgstr "比较"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.allclose <mars.tensor.allclose>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/manipulation.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/manipulation.po
index bfbcceba05..23b16a6e2d 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/manipulation.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/manipulation.po
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:2
msgid "Tensor Manipulation Routines"
-msgstr ""
-"Tensor操作例程"
+msgstr "Tensor 操作"
#: ../../source/tensor/manipulation.rst:5
msgid "Basic manipulations"
-msgstr ""
-"基本操作"
+msgstr "基本操作"
#: ../../source/tensor/manipulation.rst:13:<autosummary>:1
msgid ":obj:`mars.tensor.copyto <mars.tensor.copyto>`"
@@ -37,7 +35,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:15
msgid "Shape manipulation"
-msgstr ""
+msgstr "形状操作"
#: ../../source/tensor/manipulation.rst:116:<autosummary>:1
#: ../../source/tensor/manipulation.rst:24:<autosummary>:1
@@ -59,7 +57,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:26
msgid "Transposition"
-msgstr ""
+msgstr "变换"
#: ../../source/tensor/manipulation.rst:38:<autosummary>:1
msgid ":obj:`mars.tensor.moveaxis <mars.tensor.moveaxis>`"
@@ -103,7 +101,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:40
msgid "Edit dimensionalities"
-msgstr ""
+msgstr "修改维度"
#: ../../source/tensor/manipulation.rst:54:<autosummary>:1
msgid ":obj:`mars.tensor.atleast_1d <mars.tensor.atleast_1d>`"
@@ -163,7 +161,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:56
msgid "Changing kind of tensor"
-msgstr ""
+msgstr "修改 Tensor 类型"
#: ../../source/tensor/manipulation.rst:64:<autosummary>:1
msgid ":obj:`mars.tensor.asarray <mars.tensor.asarray>`"
@@ -175,7 +173,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:66
msgid "Joining tensors"
-msgstr ""
+msgstr "合并 Tensor"
#: ../../source/tensor/manipulation.rst:79:<autosummary>:1
msgid ":obj:`mars.tensor.concatenate <mars.tensor.concatenate>`"
@@ -227,7 +225,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:81
msgid "Splitting tensors"
-msgstr ""
+msgstr "拆分 Tensor"
#: ../../source/tensor/manipulation.rst:93:<autosummary>:1
msgid ":obj:`mars.tensor.split <mars.tensor.split>`"
@@ -267,7 +265,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:95
msgid "Tiling tensors"
-msgstr ""
+msgstr "对 Tensor 分块"
#: ../../source/tensor/manipulation.rst:104:<autosummary>:1
msgid ":obj:`mars.tensor.tile <mars.tensor.tile>`"
@@ -287,7 +285,7 @@ msgstr ""
#: ../../source/tensor/manipulation.rst:106
msgid "Rearranging elements"
-msgstr ""
+msgstr "元素重排"
#: ../../source/tensor/manipulation.rst:116:<autosummary>:1
msgid ":obj:`mars.tensor.flip <mars.tensor.flip>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/math.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/math.po
index d2c2a1fcdf..fd78300dcc 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/math.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/math.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-07 16:39+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/math.rst:2
msgid "Mathematical Functions"
-msgstr ""
-"数学函数"
+msgstr "数学函数"
#: ../../source/tensor/math.rst:5
msgid "Trigonometric functions"
-msgstr ""
-"三角函数"
+msgstr "三角函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.sin <mars.tensor.sin>`"
@@ -117,7 +115,7 @@ msgstr ""
#: ../../source/tensor/math.rst:26
msgid "Hyperbolic functions"
-msgstr ""
+msgstr "双曲函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.sinh <mars.tensor.sinh>`"
@@ -169,7 +167,7 @@ msgstr ""
#: ../../source/tensor/math.rst:41
msgid "Rounding"
-msgstr ""
+msgstr "取整"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.around <mars.tensor.around>`"
@@ -225,7 +223,7 @@ msgstr ""
#: ../../source/tensor/math.rst:57
msgid "Sums, products, differences"
-msgstr ""
+msgstr "求和、求积和差分"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.prod <mars.tensor.prod>`"
@@ -317,7 +315,7 @@ msgstr ""
#: ../../source/tensor/math.rst:76
msgid "Exponential and logarithms"
-msgstr ""
+msgstr "指数和对数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.exp <mars.tensor.exp>`"
@@ -393,7 +391,7 @@ msgstr ""
#: ../../source/tensor/math.rst:94
msgid "Other special functions"
-msgstr ""
+msgstr "其他特殊函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.i0 <mars.tensor.i0>`"
@@ -413,7 +411,7 @@ msgstr ""
#: ../../source/tensor/math.rst:105
msgid "Floating point routines"
-msgstr ""
+msgstr "浮点数操作"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.signbit <mars.tensor.signbit>`"
@@ -465,7 +463,7 @@ msgstr ""
#: ../../source/tensor/math.rst:120
msgid "Arithmetic operations"
-msgstr ""
+msgstr "算数操作"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.add <mars.tensor.add>`"
@@ -585,7 +583,7 @@ msgstr ""
#: ../../source/tensor/math.rst:144
msgid "Handling complex numbers"
-msgstr ""
+msgstr "复数操作"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.angle <mars.tensor.angle>`"
@@ -621,7 +619,7 @@ msgstr ""
#: ../../source/tensor/math.rst:157
msgid "Miscellaneous"
-msgstr ""
+msgstr "其他"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.clip <mars.tensor.clip>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/overview.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/overview.po
index d083d0dbad..cbee76a7e2 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/overview.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/overview.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-07 17:46+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,8 +19,7 @@ msgstr ""
#: ../../source/tensor/overview.rst:2
msgid "Overview"
-msgstr ""
-"概述"
+msgstr "概述"
#: ../../source/tensor/overview.rst:4
msgid ""
@@ -30,27 +29,23 @@ msgid ""
"directed graph. This lets us compute on tensors larger than memory and "
"take advantage of the ability of multi-cores or distributed clusters."
msgstr ""
-"Mars tensor 对应 Numpy ndarray,它实现了Numpy ndarray接口的一个子集。"
-"它将一个大张量切分成很多小块,并用有向图描述块之间的计算。这使我们可以在大于内存的张量上进行计算,"
-"并利用多核或分布式集群的能力。"
+"Mars tensor 对应 Numpy ndarray,实现了 Numpy ndarray 接口的子集。在执行中"
+",Mars tensor 将一个大张量切分成很多小块,并用有向图描述块之间的计算。"
+"这使我们可以在大于内存的张量上进行计算,并利用多核或分布式集群的能力。"
#: ../../source/tensor/overview.rst:9
msgid "The following is a brief overview of supported subset of Numpy interface."
-msgstr ""
-"以下是Numpy接口支持子集的简要概述。"
+msgstr "以下是 Mars tensor 所支持 Numpy 接口子集的概述。"
#: ../../source/tensor/overview.rst:11
msgid ""
"Arithmetic and mathematics: ``+``, ``-``, ``*``, ``/``, ``exp``, ``log``,"
" etc."
-msgstr ""
-"算术和数学:``+``, ``-``, ``*``, ``/``, ``exp``, ``log`` 等"
-
+msgstr "算术和数学函数:``+``、``-``、``*``、``/``、``exp``、``log`` 等"
#: ../../source/tensor/overview.rst:12
msgid "Reduction along axes (``sum``, ``max``, ``argmax``, etc)."
-msgstr ""
-"沿轴线聚合(``sum``, ``max``, ``argmax``, 等)"
+msgstr "沿坐标聚合(``sum``, ``max``, ``argmax``, 等)"
#: ../../source/tensor/overview.rst:13
msgid ""
@@ -60,11 +55,9 @@ msgid ""
"Mars does not only support create array/tensor on GPU, but also support "
"create sparse tensor."
msgstr ""
-"大多数的 ` 数组创建程序 "
-"<https://docs.scipy.org/doc/numpy/reference/routines.array-"
-"creation.html>`_ (``empty``, ``ones_like``, ``diag``, 等)。"
-"更重要的是,Mars不仅支持在GPU上创建数组/张量,还支持创建稀疏张量。"
-
+"大多数的 `数组创建方法 <https://docs.scipy.org/doc/numpy/reference/"
+"routines.array-creation.html>`_ (``empty``、``ones_like``、``diag`` 等)"
+"。更重要的是,Mars 不仅支持在 GPU 上创建数组/张量,还支持创建稀疏张量。"
#: ../../source/tensor/overview.rst:16
msgid ""
@@ -72,9 +65,9 @@ msgid ""
"<https://docs.scipy.org/doc/numpy/reference/routines.array-"
"manipulation.html>`_ (``reshape``, ``rollaxis``, ``concatenate``, etc.)"
msgstr ""
-"大部分的 `数组操作例程 "
-"<https://docs.scipy.org/doc/numpy/reference/routines.array-"
-"manipulation.html>`_ (``reshape``, ``rollaxis``, ``concatenate``, 等)"
+"大部分的 `数组操作例程 <https://docs.scipy.org/doc/numpy/reference/"
+"routines.array-manipulation.html>`_ (``reshape``、``rollaxis``、``"
+"concatenate`` 等)"
#: ../../source/tensor/overview.rst:18
msgid ""
@@ -82,16 +75,14 @@ msgid ""
"<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_ "
"(indexing by ints, slices, newaxes, and Ellipsis)"
msgstr ""
-"`基本索引 "
-"<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html>`_ "
-"(通过整数,切片,添加新维度和省略号进行索引)"
+"`基本索引 <https://docs.scipy.org/doc/numpy/reference/arrays.indexing."
+"html>`_ (通过整数、切片、添加新维度和省略号进行索引)"
#: ../../source/tensor/overview.rst:20
msgid ""
"Fancy indexing along single axis with lists or numpy arrays, e.g. x[[1, "
"4, 8], :5]"
-msgstr ""
-"使用列表或numpy数组沿单轴的花式索引,例如x [[1,4,8], :5]"
+msgstr "使用列表或 Numpy 数组沿单个坐标的花式索引,例如 x[[1,4,8], :5]"
#: ../../source/tensor/overview.rst:21
msgid ""
@@ -99,8 +90,8 @@ msgid ""
"<https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_ for "
"elementwise operations."
msgstr ""
-"元素运算的 `通用函数 "
-"<https://docs.scipy.org/doc/numpy/reference/ufuncs.html>`_"
+"元素运算的 `通用函数 <https://docs.scipy.org/doc/numpy/reference/ufuncs."
+"html>`_"
#: ../../source/tensor/overview.rst:23
msgid ""
@@ -109,10 +100,9 @@ msgid ""
"including product (``dot``, ``matmul``, etc.) and decomposition "
"(``cholesky``, ``svd``, etc.)."
msgstr ""
-"`线性代数函数 "
-"<https://docs.scipy.org/doc/numpy/reference/routines.linalg.html>`_, "
-"包括乘积 (``dot``, ``matmul``, 等)和分解"
-"(``cholesky``, ``svd``, 等)。"
+"`线性代数函数 <https://docs.scipy.org/doc/numpy/reference/routines.linalg"
+".html>`_,包括乘积(``dot``、``matmul`` 等)和分解(``cholesky``、``svd``"
+" 等)"
#: ../../source/tensor/overview.rst:26
msgid ""
@@ -121,25 +111,22 @@ msgid ""
"community is sincerely welcomed. The main feature not implemented are "
"listed below:"
msgstr ""
-"然而,Mars还没有实现整个Numpy接口,时间限制和难度是主要原因。"
-"我们真诚地欢迎社区的任何贡献。未实现的主要功能如下:"
+"然而,Mars 尚未实现 Numpy 的所有接口,时间限制和难度是主要原因。我们"
+"真诚地欢迎社区的任何贡献。未实现的主要功能如下:"
#: ../../source/tensor/overview.rst:29
msgid "Tensor with unknown shape does not support all operations."
-msgstr ""
-"形状未知的张量不支持所有操作"
+msgstr "形状未知的张量不支持所有操作"
#: ../../source/tensor/overview.rst:30
msgid "Only small subset of ``np.linalg`` are implemented."
-msgstr ""
-"只实现了一小部分 ``np.linalg``"
+msgstr "``np.linalg`` 只实现了一小部分"
#: ../../source/tensor/overview.rst:31
msgid ""
"Operations like ``sort`` which is hard to execute in parallel are not "
"implemented."
-msgstr ""
-"像 ``sort`` 这些操作很难高效并行"
+msgstr "像 ``sort`` 这样不易实现高效并行的操作尚未实现"
#: ../../source/tensor/overview.rst:32
msgid ""
@@ -147,5 +134,6 @@ msgid ""
"etc, because the iteration or loops over a large tensor is very "
"inefficient."
msgstr ""
-"Mars张量没有实现类似 ``tolist`` 和 ``nditer`` 等接口,"
-"因为迭代或者循环处理巨大的张量非常低效。"
+"Mars 张量没有实现 ``tolist`` 和 ``nditer`` 这样的接口,因为迭代或者循环"
+"处理巨大的张量非常低效"
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/random.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/random.po
index ed9a92e29e..070a90a752 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/random.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/random.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2019-01-11 15:52+0800\n"
+"POT-Creation-Date: 2019-03-13 13:29+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: Chen Quan <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -18,14 +18,12 @@ msgstr ""
"Generated-By: Babel 2.6.0\n"
#: ../../source/tensor/random.rst:4
-msgid "Random Sampling (``mars.tensor.random``)"
-msgstr ""
-"随机抽样 (``mars.tensor.random``)"
+msgid "Random Sampling"
+msgstr "随机抽样"
#: ../../source/tensor/random.rst:7
msgid "Sample random data"
-msgstr ""
-"随机数据样本"
+msgstr "随机数据样本"
#: ../../source/tensor/random.rst:24:<autosummary>:1
msgid ":obj:`mars.tensor.random.rand <mars.tensor.random.rand>`"
@@ -99,7 +97,7 @@ msgstr ""
#: ../../source/tensor/random.rst:26
msgid "Distributions"
-msgstr ""
+msgstr "随机分布"
#: ../../source/tensor/random.rst:67:<autosummary>:1
msgid ":obj:`mars.tensor.random.beta <mars.tensor.random.beta>`"
@@ -399,7 +397,7 @@ msgstr ""
#: ../../source/tensor/random.rst:69
msgid "Random number generator"
-msgstr ""
+msgstr "生成随机数"
#: ../../source/tensor/random.rst:77:<autosummary>:1
msgid ":obj:`mars.tensor.random.seed <mars.tensor.random.seed>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/routines.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/routines.po
index 8af78beaa5..75bf6fb6f0 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/routines.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/routines.po
@@ -19,8 +19,7 @@ msgstr ""
#: ../../source/tensor/routines.rst:2
msgid "Routines"
-msgstr ""
-"例程"
+msgstr "功能"
#: ../../source/tensor/routines.rst:4
msgid ""
@@ -28,5 +27,6 @@ msgid ""
"cover a subset of `NumPy routines "
"<https://docs.scipy.org/doc/numpy/reference/routines.html>`_."
msgstr ""
-"以下页面描述了与Numpy兼容的例程。这些函数涵盖了 `NumPy例程的子集 "
-"<https://docs.scipy.org/doc/numpy/reference/routines.html>`_ 。"
+"以下页面描述了与 Numpy 兼容的功能。这些函数涵盖了 `NumPy 功能的子集 <"
+"https://docs.scipy.org/doc/numpy/reference/routines.html>`_ 。"
+
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/set.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/set.po
index d7fe7d60ce..08d9865656 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/set.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/set.po
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/set.rst:2
msgid "Set routines"
-msgstr ""
-"集合例程"
+msgstr "集合函数"
#: ../../source/tensor/set.rst:5
msgid "Boolean operations"
-msgstr ""
-"布尔运算"
+msgstr "布尔运算"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.isin <mars.tensor.isin>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sorting.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sorting.po
index 6596482ddb..a7ab9ec059 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sorting.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sorting.po
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/sorting.rst:2
msgid "Sorting, Searching, and Counting"
-msgstr ""
-"排序、搜索和计数"
+msgstr "排序、搜索和计数"
#: ../../source/tensor/sorting.rst:5
msgid "Searching"
-msgstr ""
-"搜索"
+msgstr "搜索"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.argmax <mars.tensor.argmax>`"
@@ -105,8 +103,7 @@ msgstr ""
#: ../../source/tensor/sorting.rst:24
msgid "Counting"
-msgstr ""
-"计数"
+msgstr "计数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.count_nonzero <mars.tensor.count_nonzero>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sparse.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sparse.po
index 3375f31c30..76fa6b06f5 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sparse.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/sparse.po
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: mars \n"
"Report-Msgid-Bugs-To: \n"
-"POT-Creation-Date: 2018-11-28 19:54+0800\n"
+"POT-Creation-Date: 2019-03-13 13:07+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
"Last-Translator: Chen Quan <[email protected]>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
@@ -19,21 +19,18 @@ msgstr ""
#: ../../source/tensor/sparse.rst:2
msgid "Sparse tensor"
-msgstr ""
-"稀疏张量"
+msgstr "稀疏张量"
#: ../../source/tensor/sparse.rst:4
msgid ""
"Mars tensor supports sparse tensor, unfortunately, only 2-D sparse "
-"tensors are available for now. Multi-dimensional sparse tensors will be "
+"tensors are available for now. Multi-dimensional sparse tensors will be "
"supported later."
-msgstr ""
-"Mars支持稀疏张量,但不幸的是,目前只支持二维稀疏张量,以后将支持多维稀疏张量"
+msgstr "Mars 支持稀疏张量,但目前只支持二维稀疏张量,未来将支持多维稀疏张量。"
#: ../../source/tensor/sparse.rst:8
msgid "Functions to create sparse tensor"
-msgstr ""
-"创建稀疏张量的函数"
+msgstr "创建稀疏张量的函数"
#: ../../source/tensor/sparse.rst:18:<autosummary>:1
msgid ":obj:`mars.tensor.tensor <mars.tensor.tensor>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/statistics.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/statistics.po
index 313ee64ea3..3cf4cf392f 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/statistics.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/statistics.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-07 17:34+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,13 +19,11 @@ msgstr ""
#: ../../source/tensor/statistics.rst:2
msgid "Statistics"
-msgstr ""
-"统计"
+msgstr "统计"
#: ../../source/tensor/statistics.rst:5
msgid "Order statistics"
-msgstr ""
-"排序统计"
+msgstr "顺序统计量"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.amin <mars.tensor.amin>`"
@@ -71,7 +69,7 @@ msgstr ""
#: ../../source/tensor/statistics.rst:19
msgid "Average and variances"
-msgstr ""
+msgstr "均值和方差"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.average <mars.tensor.average>`"
@@ -133,7 +131,7 @@ msgstr ""
#: ../../source/tensor/statistics.rst:35
msgid "Correlating"
-msgstr ""
+msgstr "相关性"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.corrcoef <mars.tensor.corrcoef>`"
@@ -153,7 +151,7 @@ msgstr ""
#: ../../source/tensor/statistics.rst:46
msgid "Histograms"
-msgstr ""
+msgstr "直方图"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.digitize <mars.tensor.digitize>`"
diff --git a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/ufunc.po b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/ufunc.po
index e260ed67d1..ad6717db0e 100644
--- a/docs/source/locale/zh_CN/LC_MESSAGES/tensor/ufunc.po
+++ b/docs/source/locale/zh_CN/LC_MESSAGES/tensor/ufunc.po
@@ -10,7 +10,7 @@ msgstr ""
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-05-04 10:40+0800\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
-"Last-Translator: Chen Quan <[email protected]>\n"
+"Last-Translator: FULL NAME <EMAIL@ADDRESS>\n"
"Language-Team: LANGUAGE <[email protected]>\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=utf-8\n"
@@ -19,8 +19,7 @@ msgstr ""
#: ../../source/tensor/ufunc.rst:2
msgid "Universal Functions (ufunc)"
-msgstr ""
-"通用函数(ufunc)"
+msgstr "通用函数(ufunc)"
#: ../../source/tensor/ufunc.rst:4
msgid ""
@@ -28,40 +27,36 @@ msgid ""
" elementwise operations. Mars tensor's ufunc supports following features "
"of Numpy's one:"
msgstr ""
-"Mars tensor提供通用函数(也被称为ufuncs)来支持各种元素操作。"
-"Mars tensor的ufunc支持Numpy的以下功能:"
+"Mars tensor 提供通用函数(也被称为 ufunc)来支持各种元素操作。Mars "
+"tensor 的通用函数支持 Numpy 的以下功能:"
#: ../../source/tensor/ufunc.rst:7
msgid "Broadcasting"
-msgstr ""
-"广播"
+msgstr "广播"
#: ../../source/tensor/ufunc.rst:8
msgid "Output type determination"
-msgstr ""
-"输出类型确定"
+msgstr "确定输出类型"
#: ../../source/tensor/ufunc.rst:9
msgid "Casting rules"
-msgstr ""
-"类型转换规则"
+msgstr "类型转换规则"
#: ../../source/tensor/ufunc.rst:11
msgid ""
"Mars tensor's ufunc currently does not support methods like ``reduce``, "
"``accumulate``, ``reduceat``, ``outer``, and ``at``."
msgstr ""
-"Mars张量ufunc目前不支持包括方法有 ``accumulate``, ``reduceat``, ``outer`` 和 ``at``。"
+"Mars 张量通用函数目前不支持包括方法有 ``accumulate``、``reduceat``、``"
+"outer`` 和 ``at``。"
#: ../../source/tensor/ufunc.rst:15
msgid "Available ufuncs"
-msgstr ""
-"可用的ufuncs"
+msgstr "可用的通用函数"
#: ../../source/tensor/ufunc.rst:18
msgid "Math operations"
-msgstr ""
-"数学运算"
+msgstr "数学运算"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.add <mars.tensor.add>`"
@@ -69,7 +64,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Add arguments element-wise."
-msgstr ""
+msgstr "按元素相加"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.subtract <mars.tensor.subtract>`"
@@ -77,7 +72,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Subtract arguments, element-wise."
-msgstr ""
+msgstr "按元素相减"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.multiply <mars.tensor.multiply>`"
@@ -85,7 +80,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Multiply arguments element-wise."
-msgstr ""
+msgstr "按元素相乘"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.divide <mars.tensor.divide>`"
@@ -93,7 +88,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Divide arguments element-wise."
-msgstr ""
+msgstr "按元素相除"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.logaddexp <mars.tensor.logaddexp>`"
@@ -101,7 +96,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Logarithm of the sum of exponentiations of the inputs."
-msgstr ""
+msgstr "按元素计算以e为底的两个输入指数值之和的对数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.logaddexp2 <mars.tensor.logaddexp2>`"
@@ -109,7 +104,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Logarithm of the sum of exponentiations of the inputs in base-2."
-msgstr ""
+msgstr "按元素计算以2为底的两个输入指数值之和的对数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.true_divide <mars.tensor.true_divide>`"
@@ -117,7 +112,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Returns a true division of the inputs, element-wise."
-msgstr ""
+msgstr "按元素计算实数除法"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.floor_divide <mars.tensor.floor_divide>`"
@@ -125,7 +120,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the largest integer smaller or equal to the division of the inputs."
-msgstr ""
+msgstr "按元素计算除法并向下取整"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.negative <mars.tensor.negative>`"
@@ -133,7 +128,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Numerical negative, element-wise."
-msgstr ""
+msgstr "按元素求相反数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.power <mars.tensor.power>`"
@@ -141,7 +136,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "First tensor elements raised to powers from second tensor, element-wise."
-msgstr ""
+msgstr "按元素求幂,前一个张量中的数值作为底数,后一个张量的数值作为指数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.remainder <mars.tensor.remainder>`"
@@ -149,7 +144,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return element-wise remainder of division."
-msgstr ""
+msgstr "按元素返回整数除法的余数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.mod <mars.tensor.mod>`"
@@ -161,7 +156,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the element-wise remainder of division."
-msgstr ""
+msgstr "按元素返回整数除法的余数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.absolute <mars.tensor.absolute>`"
@@ -169,7 +164,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Calculate the absolute value element-wise."
-msgstr ""
+msgstr "按元素计算绝对值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.rint <mars.tensor.rint>`"
@@ -177,7 +172,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Round elements of the tensor to the nearest integer."
-msgstr ""
+msgstr "按元素将张量中的值取整到最接近的整数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.sign <mars.tensor.sign>`"
@@ -185,7 +180,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Returns an element-wise indication of the sign of a number."
-msgstr ""
+msgstr "按元素取符号"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.exp <mars.tensor.exp>`"
@@ -193,7 +188,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Calculate the exponential of all elements in the input tensor."
-msgstr ""
+msgstr "计算所有元素的指数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.exp2 <mars.tensor.exp2>`"
@@ -201,7 +196,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Calculate `2**p` for all `p` in the input tensor."
-msgstr ""
+msgstr "计算所有元素以2为底的指数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.log <mars.tensor.log>`"
@@ -209,7 +204,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Natural logarithm, element-wise."
-msgstr ""
+msgstr "按元素求自然对数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.log2 <mars.tensor.log2>`"
@@ -217,7 +212,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Base-2 logarithm of `x`."
-msgstr ""
+msgstr "计算所有元素以2为底的对数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.log10 <mars.tensor.log10>`"
@@ -225,7 +220,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the base 10 logarithm of the input tensor, element-wise."
-msgstr ""
+msgstr "计算所有元素以10为底的对数值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.expm1 <mars.tensor.expm1>`"
@@ -233,7 +228,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Calculate ``exp(x) - 1`` for all elements in the tensor."
-msgstr ""
+msgstr "对张量中的所有元素计算 ``exp(x) - 1``"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.log1p <mars.tensor.log1p>`"
@@ -241,7 +236,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the natural logarithm of one plus the input tensor, element-wise."
-msgstr ""
+msgstr "对张量中的所有元素计算 ``log(1 + x)``"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.sqrt <mars.tensor.sqrt>`"
@@ -249,7 +244,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the positive square-root of an tensor, element-wise."
-msgstr ""
+msgstr "返回按元素求平方根的结果"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.square <mars.tensor.square>`"
@@ -257,7 +252,7 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the element-wise square of the input."
-msgstr ""
+msgstr "返回按元素求平方的结果"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.reciprocal <mars.tensor.reciprocal>`"
@@ -265,11 +260,11 @@ msgstr ""
#: ../../<autosummary>:1
msgid "Return the reciprocal of the argument, element-wise."
-msgstr ""
+msgstr "返回按元素求倒数的结果"
#: ../../source/tensor/ufunc.rst:53
msgid "Trigonometric functions"
-msgstr ""
+msgstr "三角函数和双曲函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.sin <mars.tensor.sin>`"
@@ -401,7 +396,7 @@ msgstr ""
#: ../../source/tensor/ufunc.rst:78
msgid "Bit-twiddling functions"
-msgstr ""
+msgstr "位运算函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.bitwise_and <mars.tensor.bitwise_and>`"
@@ -453,7 +448,7 @@ msgstr ""
#: ../../source/tensor/ufunc.rst:93
msgid "Comparison functions"
-msgstr ""
+msgstr "比较函数"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.greater <mars.tensor.greater>`"
@@ -569,7 +564,7 @@ msgstr ""
#: ../../source/tensor/ufunc.rst:116
msgid "Floating point values"
-msgstr ""
+msgstr "浮点数取值"
#: ../../<autosummary>:1
msgid ":obj:`mars.tensor.isfinite <mars.tensor.isfinite>`"
diff --git a/docs/source/norm_zh.py b/docs/source/norm_zh.py
index 3e6bf7428f..621efff5e9 100755
--- a/docs/source/norm_zh.py
+++ b/docs/source/norm_zh.py
@@ -130,7 +130,7 @@ def main():
# only modify recent-changed files
modify_time = datetime.datetime.fromtimestamp(os.path.getmtime(path))
- if (datetime.datetime.now() - modify_time).total_seconds() > 1800:
+ if (datetime.datetime.now() - modify_time).total_seconds() > 120:
continue
with open(path, 'rb') as inpf:
diff --git a/docs/source/tensor/datasource.rst b/docs/source/tensor/datasource.rst
index 172a251ac9..5485bd282a 100644
--- a/docs/source/tensor/datasource.rst
+++ b/docs/source/tensor/datasource.rst
@@ -2,7 +2,7 @@ Create Mars tensor
==================
You can create mars tensor from Python array like object just like Numpy, or create from Numpy array directly.
-More details on :doc:`array creation routine <routines/creation>` and :doc:`random sampling <routines/random>`.
+More details on :doc:`array creation routine <creation>` and :doc:`random sampling <random>`.
.. autosummary::
:toctree: generated/
@@ -68,7 +68,7 @@ Assume we have such a tensor with the data shown below.
4 2 4 6 2 0
6 8 2 6 5 4
-We will show how different ``chunk_size=`` arguments will tile the tensor.
+We will show how different ``chunk_size`` arguments will tile the tensor.
``chunk_size=3``:
diff --git a/docs/source/tensor/eager-mode.rst b/docs/source/tensor/eager-mode.rst
index 7405d216ed..50a4a3db52 100644
--- a/docs/source/tensor/eager-mode.rst
+++ b/docs/source/tensor/eager-mode.rst
@@ -12,7 +12,6 @@ program or console session.
.. code-block:: python
>>> from mars.config import options
-
>>> options.eager_mode = True
Or use a context.
diff --git a/docs/source/tensor/execution.rst b/docs/source/tensor/execution.rst
index 1d6be62492..9b63adc605 100644
--- a/docs/source/tensor/execution.rst
+++ b/docs/source/tensor/execution.rst
@@ -1,15 +1,18 @@
Local Execution
===============
-Mars tensor will not be executed unless users call ``execute`` or ``session.run`` methods.
+When :doc:`eager mode <eager-mode>` is not enabled, which is the default
+behavior, Mars tensor will not be executed unless users call ``execute`` or
+``session.run`` methods.
-If no session is created explicitly, the ``execute`` will create a local session, and mark it as a default session.
+If no session is created explicitly, the ``execute`` will create a local
+session, and mark it as a default session.
Session
-------
-Users can create a new session by ``new_session`` method, if no argument is provided,
-a local session will be generated.
+Users can create a new session by ``new_session`` method, if no argument is
+provided, a local session will be generated.
.. code-block:: python
@@ -18,7 +21,8 @@ a local session will be generated.
>>> sess = new_session() # create a session
-By calling ``as_default`` of a session, the session will be marked as the default session.
+By calling ``as_default`` of a session, the session will be marked as the
+default session.
.. code-block:: python
@@ -26,7 +30,8 @@ By calling ``as_default`` of a session, the session will be marked as the defaul
>>> sess.as_default()
-More than one mars tensors can be passed to ``session.run``, and calculate the results for each tensor.
+More than one mars tensors can be passed to ``session.run``, and calculate the
+results for each tensor.
.. code-block:: python
@@ -56,9 +61,9 @@ For a single tensor, ``execute`` can be called.
>>> a.sum().execute()
7.0293719034458455
-Session can be specified by the argument ``session=``.
+Session can be specified by the argument ``session``.
.. code-block:: python
>>> a.sum().execute(session=sess)
- 6.12833989477539
\ No newline at end of file
+ 6.12833989477539
diff --git a/docs/source/tensor/generated/mars.tensor.inner.rst b/docs/source/tensor/generated/mars.tensor.inner.rst
new file mode 100644
index 0000000000..10084f2de0
--- /dev/null
+++ b/docs/source/tensor/generated/mars.tensor.inner.rst
@@ -0,0 +1,6 @@
+mars.tensor.inner
+=================
+
+.. currentmodule:: mars.tensor
+
+.. autofunction:: inner
\ No newline at end of file
diff --git a/docs/source/tensor/linalg.rst b/docs/source/tensor/linalg.rst
index d73a3ed171..6e1ab0e33f 100644
--- a/docs/source/tensor/linalg.rst
+++ b/docs/source/tensor/linalg.rst
@@ -1,5 +1,5 @@
-Linear Algebra (``mars.tensor.linalg``)
-=======================================
+Linear Algebra
+==============
Matrix and vector products
--------------------------
@@ -10,6 +10,7 @@ Matrix and vector products
mars.tensor.dot
mars.tensor.vdot
+ mars.tensor.inner
mars.tensor.matmul
mars.tensor.tensordot
diff --git a/docs/source/tensor/random.rst b/docs/source/tensor/random.rst
index 5ce1555d20..50a4e8c065 100644
--- a/docs/source/tensor/random.rst
+++ b/docs/source/tensor/random.rst
@@ -1,7 +1,7 @@
.. module:: mars.tensor.random
-Random Sampling (``mars.tensor.random``)
-========================================
+Random Sampling
+===============
Sample random data
------------------
diff --git a/docs/source/tensor/sparse.rst b/docs/source/tensor/sparse.rst
index c8a49d2e22..8f80088620 100644
--- a/docs/source/tensor/sparse.rst
+++ b/docs/source/tensor/sparse.rst
@@ -1,8 +1,8 @@
Sparse tensor
=============
-Mars tensor supports sparse tensor, unfortunately, only 2-D sparse tensors are available for now.
-Multi-dimensional sparse tensors will be supported later.
+Mars tensor supports sparse tensor, unfortunately, only 2-D sparse tensors are
+available for now. Multi-dimensional sparse tensors will be supported later.
Functions to create sparse tensor
---------------------------------
| [BUG] Chinese document layout has a link error.
<!--
Thank you for your contribution!
Please review https://github.com/mars-project/mars/blob/master/CONTRIBUTING.rst before opening an issue.
-->
**Describe the bug**
A clear and concise description of what the bug is.
Chinese document layout has a connection error.
doc link:[https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html](https://mars-project.readthedocs.io/zh_CN/latest/tensor/overview.html)

|
keras-team__keras-7552 | [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\n\nimport copy\nimport types as python_types\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import InputSpec\nfrom ..engine import Layer\nfrom ..utils.generic_utils import func_dump\nfrom ..utils.generic_utils import func_load\nfrom ..utils.generic_utils import deserialize_keras_object\nfrom ..utils.generic_utils import has_arg\nfrom ..legacy import interfaces\n\n\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n # Example\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to a LSTM layer.\n You want to mask timestep #3 and #5 because you lack data for\n these timesteps. You can:\n\n - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n model = Sequential()\n model.add(Masking(mask_value=0., input_shape=(timesteps, features)))\n model.add(LSTM(32))\n ```\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n\n def compute_mask(self, inputs, mask=None):\n return K.any(K.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\n axis=-1, keepdims=True)\n return inputs * K.cast(boolean_mask, K.floatx())\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n # References\n - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)\n \"\"\"\n @interfaces.legacy_dropout_support\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = min(1., max(0., rate))\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, _):\n return self.noise_shape\n\n def call(self, inputs, training=None):\n if 0. < self.rate < 1.:\n noise_shape = self._get_noise_shape(inputs)\n\n def dropped_inputs():\n return K.dropout(inputs, self.rate, noise_shape,\n seed=self.seed)\n return K.in_train_phase(dropped_inputs, inputs,\n training=training)\n return inputs\n\n def get_config(self):\n config = {'rate': self.rate}\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n\n # Input shape\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropout1d_support\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, input_shape[3])\n return noise_shape\n\n\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])\n return noise_shape\n\n\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n # Arguments\n activation: name of activation function to use\n (see: [activations](../activations.md)),\n or alternatively, a Theano or TensorFlow operation.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n # Arguments\n target_shape: target shape. Tuple of integers.\n Does not include the batch axis.\n\n # Input shape\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n `(batch_size,) + target_shape`\n\n # Example\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, 3, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Finds and replaces a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n # Arguments\n input_shape: original shape of array being reshaped\n output_shape: target shape of the array, with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n # Returns\n The new output shape with a `-1` replaced with its computed value.\n\n # Raises\n ValueError: if `input_shape` and `output_shape` do not match.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n\n return tuple(output_shape)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0],) + self._fix_unknown_dimension(\n input_shape[1:], self.target_shape)\n\n def call(self, inputs):\n # In case the target shape is not fully defined,\n # we need access to the shape of `inputs`.\n # solution: rely on `K.int_shape`.\n target_shape = self.target_shape\n if -1 in target_shape:\n # Target shape not fully defined.\n input_shape = None\n try:\n input_shape = K.int_shape(inputs)\n except TypeError:\n pass\n if input_shape is not None:\n target_shape = self.compute_output_shape(input_shape)[1:]\n return K.reshape(inputs, (-1,) + target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n # Arguments\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimension\n of the input.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tuple(output_shape)\n\n def call(self, inputs):\n return K.permute_dimensions(inputs, (0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Conv2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.input_spec = InputSpec(min_ndim=3)\n\n def compute_output_shape(self, input_shape):\n if not all(input_shape[1:]):\n raise ValueError('The shape of the input to \"Flatten\" '\n 'is not fully defined '\n '(got ' + str(input_shape[1:]) + '. '\n 'Make sure to pass a complete \"input_shape\" '\n 'or \"batch_input_shape\" argument to the first '\n 'layer in your model.')\n return (input_shape[0], np.prod(input_shape[1:]))\n\n def call(self, inputs):\n return K.batch_flatten(inputs)\n\n\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n # Arguments\n n: integer, repetition factor.\n\n # Input shape\n 2D tensor of shape `(num_samples, features)`.\n\n # Output shape\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n, input_shape[1])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expression as a `Layer` object.\n\n # Examples\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n def antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n\n model.add(Lambda(antirectifier,\n output_shape=antirectifier_output_shape))\n ```\n\n # Arguments\n function: The function to be evaluated.\n Takes input tensor as first argument.\n output_shape: Expected output shape from function.\n Only relevant when using Theano.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and\n the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of the\n input shape: `output_shape = f(input_shape)`\n arguments: optional dictionary of keyword arguments to be passed\n to the function.\n\n # Input shape\n Arbitrary. Use the keyword argument input_shape\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Specified by `output_shape` argument\n (or auto-inferred when using TensorFlow).\n \"\"\"\n\n @interfaces.legacy_lambda_support\n def __init__(self, function, output_shape=None,\n mask=None, arguments=None, **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n\n if output_shape is None:\n self._output_shape = None\n elif isinstance(output_shape, (tuple, list)):\n self._output_shape = tuple(output_shape)\n else:\n if not callable(output_shape):\n raise TypeError('In Lambda, `output_shape` '\n 'must be a list, a tuple, or a function.')\n self._output_shape = output_shape\n\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # With TensorFlow, we can infer the output shape directly:\n if K.backend() == 'tensorflow':\n if isinstance(input_shape, list):\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if isinstance(x, list):\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # Otherwise, we default to the input shape.\n warnings.warn('`output_shape` argument not specified for layer {} '\n 'and cannot be automatically inferred '\n 'with the Theano backend. '\n 'Defaulting to output shape `{}` '\n '(same as input shape). '\n 'If the expected output shape is different, '\n 'specify it via the `output_shape` argument.'\n .format(self.name, input_shape))\n return input_shape\n elif isinstance(self._output_shape, (tuple, list)):\n if isinstance(input_shape, list):\n num_samples = input_shape[0][0]\n else:\n num_samples = input_shape[0] if input_shape else None\n return (num_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if not isinstance(shape, (list, tuple)):\n raise ValueError('`output_shape` function must return a tuple or a list of tuples.')\n if isinstance(shape, list):\n if isinstance(shape[0], int) or shape[0] is None:\n shape = tuple(shape)\n return shape\n\n def call(self, inputs, mask=None):\n arguments = self.arguments\n if has_arg(self.function, 'mask'):\n arguments['mask'] = mask\n return self.function(inputs, **arguments)\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n if isinstance(self.function, python_types.LambdaType):\n function = func_dump(self.function)\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n output_shape = func_dump(self._output_shape)\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type,\n 'arguments': self.arguments}\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n globs = globals()\n if custom_objects:\n globs = dict(list(globs.items()) + list(custom_objects.items()))\n function_type = config.pop('function_type')\n if function_type == 'function':\n # Simple lookup in custom objects\n function = deserialize_keras_object(\n config['function'],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = func_load(config['function'], globs=globs)\n else:\n raise TypeError('Unknown function type:', function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n # Simple lookup in custom objects\n output_shape = deserialize_keras_object(\n config['output_shape'],\n custom_objects=custom_objects,\n printable_module_name='output_shape function in Lambda layer')\n elif output_shape_type == 'lambda':\n # Unsafe deserialization from bytecode\n output_shape = func_load(config['output_shape'], globs=globs)\n else:\n output_shape = config['output_shape']\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\n\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: if the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n # Example\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n\n # Input shape\n nD tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n # Output shape\n nD tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n @interfaces.legacy_dense_support\n def __init__(self, units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(Dense, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n output = K.dot(inputs, self.kernel)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n # Arguments\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(**kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)\n\n def get_config(self):\n config = {'l1': self.l1,\n 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"path": "keras/layers/core.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nfrom __future__ import division\n\nimport numpy as np\n\nimport copy\nimport types as python_types\nimport warnings\n\nfrom .. import backend as K\nfrom .. import activations\nfrom .. import initializers\nfrom .. import regularizers\nfrom .. import constraints\nfrom ..engine import InputSpec\nfrom ..engine import Layer\nfrom ..utils.generic_utils import func_dump\nfrom ..utils.generic_utils import func_load\nfrom ..utils.generic_utils import deserialize_keras_object\nfrom ..utils.generic_utils import has_arg\nfrom ..legacy import interfaces\n\n\nclass Masking(Layer):\n \"\"\"Masks a sequence by using a mask value to skip timesteps.\n\n For each timestep in the input tensor (dimension #1 in the tensor),\n if all values in the input tensor at that timestep\n are equal to `mask_value`, then the timestep will be masked (skipped)\n in all downstream layers (as long as they support masking).\n\n If any downstream layer does not support masking yet receives such\n an input mask, an exception will be raised.\n\n # Example\n\n Consider a Numpy data array `x` of shape `(samples, timesteps, features)`,\n to be fed to a LSTM layer.\n You want to mask timestep #3 and #5 because you lack data for\n these timesteps. You can:\n\n - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.`\n - insert a `Masking` layer with `mask_value=0.` before the LSTM layer:\n\n ```python\n model = Sequential()\n model.add(Masking(mask_value=0., input_shape=(timesteps, features)))\n model.add(LSTM(32))\n ```\n \"\"\"\n\n def __init__(self, mask_value=0., **kwargs):\n super(Masking, self).__init__(**kwargs)\n self.supports_masking = True\n self.mask_value = mask_value\n\n def compute_mask(self, inputs, mask=None):\n return K.any(K.not_equal(inputs, self.mask_value), axis=-1)\n\n def call(self, inputs):\n boolean_mask = K.any(K.not_equal(inputs, self.mask_value),\n axis=-1, keepdims=True)\n return inputs * K.cast(boolean_mask, inputs.dtype)\n\n def get_config(self):\n config = {'mask_value': self.mask_value}\n base_config = super(Masking, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Dropout(Layer):\n \"\"\"Applies Dropout to the input.\n\n Dropout consists in randomly setting\n a fraction `rate` of input units to 0 at each update during training time,\n which helps prevent overfitting.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n noise_shape: 1D integer tensor representing the shape of the\n binary dropout mask that will be multiplied with the input.\n For instance, if your inputs have shape\n `(batch_size, timesteps, features)` and\n you want the dropout mask to be the same for all timesteps,\n you can use `noise_shape=(batch_size, 1, features)`.\n seed: A Python integer to use as random seed.\n\n # References\n - [Dropout: A Simple Way to Prevent Neural Networks from Overfitting](http://www.cs.toronto.edu/~rsalakhu/papers/srivastava14a.pdf)\n \"\"\"\n @interfaces.legacy_dropout_support\n def __init__(self, rate, noise_shape=None, seed=None, **kwargs):\n super(Dropout, self).__init__(**kwargs)\n self.rate = min(1., max(0., rate))\n self.noise_shape = noise_shape\n self.seed = seed\n self.supports_masking = True\n\n def _get_noise_shape(self, _):\n return self.noise_shape\n\n def call(self, inputs, training=None):\n if 0. < self.rate < 1.:\n noise_shape = self._get_noise_shape(inputs)\n\n def dropped_inputs():\n return K.dropout(inputs, self.rate, noise_shape,\n seed=self.seed)\n return K.in_train_phase(dropped_inputs, inputs,\n training=training)\n return inputs\n\n def get_config(self):\n config = {'rate': self.rate}\n base_config = super(Dropout, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass SpatialDropout1D(Dropout):\n \"\"\"Spatial 1D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 1D feature maps instead of individual elements. If adjacent frames\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout1D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n\n # Input shape\n 3D tensor with shape:\n `(samples, timesteps, channels)`\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropout1d_support\n def __init__(self, rate, **kwargs):\n super(SpatialDropout1D, self).__init__(rate, **kwargs)\n self.input_spec = InputSpec(ndim=3)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n noise_shape = (input_shape[0], 1, input_shape[2])\n return noise_shape\n\n\nclass SpatialDropout2D(Dropout):\n \"\"\"Spatial 2D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 2D feature maps instead of individual elements. If adjacent pixels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout2D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension\n (the depth) is at index 1,\n in 'channels_last' mode is it at index 3.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 4D tensor with shape:\n `(samples, channels, rows, cols)` if data_format='channels_first'\n or 4D tensor with shape:\n `(samples, rows, cols, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout2D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=4)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, input_shape[3])\n return noise_shape\n\n\nclass SpatialDropout3D(Dropout):\n \"\"\"Spatial 3D version of Dropout.\n\n This version performs the same function as Dropout, however it drops\n entire 3D feature maps instead of individual elements. If adjacent voxels\n within feature maps are strongly correlated (as is normally the case in\n early convolution layers) then regular dropout will not regularize the\n activations and will otherwise just result in an effective learning rate\n decrease. In this case, SpatialDropout3D will help promote independence\n between feature maps and should be used instead.\n\n # Arguments\n rate: float between 0 and 1. Fraction of the input units to drop.\n data_format: 'channels_first' or 'channels_last'.\n In 'channels_first' mode, the channels dimension (the depth)\n is at index 1, in 'channels_last' mode is it at index 4.\n It defaults to the `image_data_format` value found in your\n Keras config file at `~/.keras/keras.json`.\n If you never set it, then it will be \"channels_last\".\n\n # Input shape\n 5D tensor with shape:\n `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first'\n or 5D tensor with shape:\n `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'.\n\n # Output shape\n Same as input\n\n # References\n - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280)\n \"\"\"\n\n @interfaces.legacy_spatialdropoutNd_support\n def __init__(self, rate, data_format=None, **kwargs):\n super(SpatialDropout3D, self).__init__(rate, **kwargs)\n if data_format is None:\n data_format = K.image_data_format()\n if data_format not in {'channels_last', 'channels_first'}:\n raise ValueError('`data_format` must be in '\n '{`\"channels_last\"`, `\"channels_first\"`}')\n self.data_format = data_format\n self.input_spec = InputSpec(ndim=5)\n\n def _get_noise_shape(self, inputs):\n input_shape = K.shape(inputs)\n if self.data_format == 'channels_first':\n noise_shape = (input_shape[0], input_shape[1], 1, 1, 1)\n else:\n noise_shape = (input_shape[0], 1, 1, 1, input_shape[4])\n return noise_shape\n\n\nclass Activation(Layer):\n \"\"\"Applies an activation function to an output.\n\n # Arguments\n activation: name of activation function to use\n (see: [activations](../activations.md)),\n or alternatively, a Theano or TensorFlow operation.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, activation, **kwargs):\n super(Activation, self).__init__(**kwargs)\n self.supports_masking = True\n self.activation = activations.get(activation)\n\n def call(self, inputs):\n return self.activation(inputs)\n\n def get_config(self):\n config = {'activation': activations.serialize(self.activation)}\n base_config = super(Activation, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Reshape(Layer):\n \"\"\"Reshapes an output to a certain shape.\n\n # Arguments\n target_shape: target shape. Tuple of integers.\n Does not include the batch axis.\n\n # Input shape\n Arbitrary, although all dimensions in the input shaped must be fixed.\n Use the keyword argument `input_shape`\n (tuple of integers, does not include the batch axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n `(batch_size,) + target_shape`\n\n # Example\n\n ```python\n # as first layer in a Sequential model\n model = Sequential()\n model.add(Reshape((3, 4), input_shape=(12,)))\n # now: model.output_shape == (None, 3, 4)\n # note: `None` is the batch dimension\n\n # as intermediate layer in a Sequential model\n model.add(Reshape((6, 2)))\n # now: model.output_shape == (None, 6, 2)\n\n # also supports shape inference using `-1` as dimension\n model.add(Reshape((-1, 2, 2)))\n # now: model.output_shape == (None, 3, 2, 2)\n ```\n \"\"\"\n\n def __init__(self, target_shape, **kwargs):\n super(Reshape, self).__init__(**kwargs)\n self.target_shape = tuple(target_shape)\n\n def _fix_unknown_dimension(self, input_shape, output_shape):\n \"\"\"Finds and replaces a missing dimension in an output shape.\n\n This is a near direct port of the internal Numpy function\n `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c`\n\n # Arguments\n input_shape: original shape of array being reshaped\n output_shape: target shape of the array, with at most\n a single -1 which indicates a dimension that should be\n derived from the input shape.\n\n # Returns\n The new output shape with a `-1` replaced with its computed value.\n\n # Raises\n ValueError: if `input_shape` and `output_shape` do not match.\n \"\"\"\n output_shape = list(output_shape)\n msg = 'total size of new array must be unchanged'\n\n known, unknown = 1, None\n for index, dim in enumerate(output_shape):\n if dim < 0:\n if unknown is None:\n unknown = index\n else:\n raise ValueError('Can only specify one unknown dimension.')\n else:\n known *= dim\n\n original = np.prod(input_shape, dtype=int)\n if unknown is not None:\n if known == 0 or original % known != 0:\n raise ValueError(msg)\n output_shape[unknown] = original // known\n elif original != known:\n raise ValueError(msg)\n\n return tuple(output_shape)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0],) + self._fix_unknown_dimension(\n input_shape[1:], self.target_shape)\n\n def call(self, inputs):\n # In case the target shape is not fully defined,\n # we need access to the shape of `inputs`.\n # solution: rely on `K.int_shape`.\n target_shape = self.target_shape\n if -1 in target_shape:\n # Target shape not fully defined.\n input_shape = None\n try:\n input_shape = K.int_shape(inputs)\n except TypeError:\n pass\n if input_shape is not None:\n target_shape = self.compute_output_shape(input_shape)[1:]\n return K.reshape(inputs, (-1,) + target_shape)\n\n def get_config(self):\n config = {'target_shape': self.target_shape}\n base_config = super(Reshape, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Permute(Layer):\n \"\"\"Permutes the dimensions of the input according to a given pattern.\n\n Useful for e.g. connecting RNNs and convnets together.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Permute((2, 1), input_shape=(10, 64)))\n # now: model.output_shape == (None, 64, 10)\n # note: `None` is the batch dimension\n ```\n\n # Arguments\n dims: Tuple of integers. Permutation pattern, does not include the\n samples dimension. Indexing starts at 1.\n For instance, `(2, 1)` permutes the first and second dimension\n of the input.\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same as the input shape, but with the dimensions re-ordered according\n to the specified pattern.\n \"\"\"\n\n def __init__(self, dims, **kwargs):\n super(Permute, self).__init__(**kwargs)\n self.dims = tuple(dims)\n self.input_spec = InputSpec(ndim=len(self.dims) + 1)\n\n def compute_output_shape(self, input_shape):\n input_shape = list(input_shape)\n output_shape = copy.copy(input_shape)\n for i, dim in enumerate(self.dims):\n target_dim = input_shape[dim]\n output_shape[i + 1] = target_dim\n return tuple(output_shape)\n\n def call(self, inputs):\n return K.permute_dimensions(inputs, (0,) + self.dims)\n\n def get_config(self):\n config = {'dims': self.dims}\n base_config = super(Permute, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Flatten(Layer):\n \"\"\"Flattens the input. Does not affect the batch size.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Conv2D(64, 3, 3,\n border_mode='same',\n input_shape=(3, 32, 32)))\n # now: model.output_shape == (None, 64, 32, 32)\n\n model.add(Flatten())\n # now: model.output_shape == (None, 65536)\n ```\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Flatten, self).__init__(**kwargs)\n self.input_spec = InputSpec(min_ndim=3)\n\n def compute_output_shape(self, input_shape):\n if not all(input_shape[1:]):\n raise ValueError('The shape of the input to \"Flatten\" '\n 'is not fully defined '\n '(got ' + str(input_shape[1:]) + '. '\n 'Make sure to pass a complete \"input_shape\" '\n 'or \"batch_input_shape\" argument to the first '\n 'layer in your model.')\n return (input_shape[0], np.prod(input_shape[1:]))\n\n def call(self, inputs):\n return K.batch_flatten(inputs)\n\n\nclass RepeatVector(Layer):\n \"\"\"Repeats the input n times.\n\n # Example\n\n ```python\n model = Sequential()\n model.add(Dense(32, input_dim=32))\n # now: model.output_shape == (None, 32)\n # note: `None` is the batch dimension\n\n model.add(RepeatVector(3))\n # now: model.output_shape == (None, 3, 32)\n ```\n\n # Arguments\n n: integer, repetition factor.\n\n # Input shape\n 2D tensor of shape `(num_samples, features)`.\n\n # Output shape\n 3D tensor of shape `(num_samples, n, features)`.\n \"\"\"\n\n def __init__(self, n, **kwargs):\n super(RepeatVector, self).__init__(**kwargs)\n self.n = n\n self.input_spec = InputSpec(ndim=2)\n\n def compute_output_shape(self, input_shape):\n return (input_shape[0], self.n, input_shape[1])\n\n def call(self, inputs):\n return K.repeat(inputs, self.n)\n\n def get_config(self):\n config = {'n': self.n}\n base_config = super(RepeatVector, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass Lambda(Layer):\n \"\"\"Wraps arbitrary expression as a `Layer` object.\n\n # Examples\n\n ```python\n # add a x -> x^2 layer\n model.add(Lambda(lambda x: x ** 2))\n ```\n ```python\n # add a layer that returns the concatenation\n # of the positive part of the input and\n # the opposite of the negative part\n\n def antirectifier(x):\n x -= K.mean(x, axis=1, keepdims=True)\n x = K.l2_normalize(x, axis=1)\n pos = K.relu(x)\n neg = K.relu(-x)\n return K.concatenate([pos, neg], axis=1)\n\n def antirectifier_output_shape(input_shape):\n shape = list(input_shape)\n assert len(shape) == 2 # only valid for 2D tensors\n shape[-1] *= 2\n return tuple(shape)\n\n model.add(Lambda(antirectifier,\n output_shape=antirectifier_output_shape))\n ```\n\n # Arguments\n function: The function to be evaluated.\n Takes input tensor as first argument.\n output_shape: Expected output shape from function.\n Only relevant when using Theano.\n Can be a tuple or function.\n If a tuple, it only specifies the first dimension onward;\n sample dimension is assumed either the same as the input:\n `output_shape = (input_shape[0], ) + output_shape`\n or, the input is `None` and\n the sample dimension is also `None`:\n `output_shape = (None, ) + output_shape`\n If a function, it specifies the entire shape as a function of the\n input shape: `output_shape = f(input_shape)`\n arguments: optional dictionary of keyword arguments to be passed\n to the function.\n\n # Input shape\n Arbitrary. Use the keyword argument input_shape\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Specified by `output_shape` argument\n (or auto-inferred when using TensorFlow).\n \"\"\"\n\n @interfaces.legacy_lambda_support\n def __init__(self, function, output_shape=None,\n mask=None, arguments=None, **kwargs):\n super(Lambda, self).__init__(**kwargs)\n self.function = function\n self.arguments = arguments if arguments else {}\n if mask is not None:\n self.supports_masking = True\n self.mask = mask\n\n if output_shape is None:\n self._output_shape = None\n elif isinstance(output_shape, (tuple, list)):\n self._output_shape = tuple(output_shape)\n else:\n if not callable(output_shape):\n raise TypeError('In Lambda, `output_shape` '\n 'must be a list, a tuple, or a function.')\n self._output_shape = output_shape\n\n def compute_output_shape(self, input_shape):\n if self._output_shape is None:\n # With TensorFlow, we can infer the output shape directly:\n if K.backend() == 'tensorflow':\n if isinstance(input_shape, list):\n xs = [K.placeholder(shape=shape) for shape in input_shape]\n x = self.call(xs)\n else:\n x = K.placeholder(shape=input_shape)\n x = self.call(x)\n if isinstance(x, list):\n return [K.int_shape(x_elem) for x_elem in x]\n else:\n return K.int_shape(x)\n # Otherwise, we default to the input shape.\n warnings.warn('`output_shape` argument not specified for layer {} '\n 'and cannot be automatically inferred '\n 'with the Theano backend. '\n 'Defaulting to output shape `{}` '\n '(same as input shape). '\n 'If the expected output shape is different, '\n 'specify it via the `output_shape` argument.'\n .format(self.name, input_shape))\n return input_shape\n elif isinstance(self._output_shape, (tuple, list)):\n if isinstance(input_shape, list):\n num_samples = input_shape[0][0]\n else:\n num_samples = input_shape[0] if input_shape else None\n return (num_samples,) + tuple(self._output_shape)\n else:\n shape = self._output_shape(input_shape)\n if not isinstance(shape, (list, tuple)):\n raise ValueError('`output_shape` function must return a tuple or a list of tuples.')\n if isinstance(shape, list):\n if isinstance(shape[0], int) or shape[0] is None:\n shape = tuple(shape)\n return shape\n\n def call(self, inputs, mask=None):\n arguments = self.arguments\n if has_arg(self.function, 'mask'):\n arguments['mask'] = mask\n return self.function(inputs, **arguments)\n\n def compute_mask(self, inputs, mask=None):\n if callable(self.mask):\n return self.mask(inputs, mask)\n return self.mask\n\n def get_config(self):\n if isinstance(self.function, python_types.LambdaType):\n function = func_dump(self.function)\n function_type = 'lambda'\n else:\n function = self.function.__name__\n function_type = 'function'\n\n if isinstance(self._output_shape, python_types.LambdaType):\n output_shape = func_dump(self._output_shape)\n output_shape_type = 'lambda'\n elif callable(self._output_shape):\n output_shape = self._output_shape.__name__\n output_shape_type = 'function'\n else:\n output_shape = self._output_shape\n output_shape_type = 'raw'\n\n config = {'function': function,\n 'function_type': function_type,\n 'output_shape': output_shape,\n 'output_shape_type': output_shape_type,\n 'arguments': self.arguments}\n base_config = super(Lambda, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n @classmethod\n def from_config(cls, config, custom_objects=None):\n globs = globals()\n if custom_objects:\n globs = dict(list(globs.items()) + list(custom_objects.items()))\n function_type = config.pop('function_type')\n if function_type == 'function':\n # Simple lookup in custom objects\n function = deserialize_keras_object(\n config['function'],\n custom_objects=custom_objects,\n printable_module_name='function in Lambda layer')\n elif function_type == 'lambda':\n # Unsafe deserialization from bytecode\n function = func_load(config['function'], globs=globs)\n else:\n raise TypeError('Unknown function type:', function_type)\n\n output_shape_type = config.pop('output_shape_type')\n if output_shape_type == 'function':\n # Simple lookup in custom objects\n output_shape = deserialize_keras_object(\n config['output_shape'],\n custom_objects=custom_objects,\n printable_module_name='output_shape function in Lambda layer')\n elif output_shape_type == 'lambda':\n # Unsafe deserialization from bytecode\n output_shape = func_load(config['output_shape'], globs=globs)\n else:\n output_shape = config['output_shape']\n\n # If arguments were numpy array, they have been saved as\n # list. We need to recover the ndarray\n if 'arguments' in config:\n for key in config['arguments']:\n if isinstance(config['arguments'][key], dict):\n arg_dict = config['arguments'][key]\n if 'type' in arg_dict and arg_dict['type'] == 'ndarray':\n # Overwrite the argument with its numpy translation\n config['arguments'][key] = np.array(arg_dict['value'])\n\n config['function'] = function\n config['output_shape'] = output_shape\n return cls(**config)\n\n\nclass Dense(Layer):\n \"\"\"Just your regular densely-connected NN layer.\n\n `Dense` implements the operation:\n `output = activation(dot(input, kernel) + bias)`\n where `activation` is the element-wise activation function\n passed as the `activation` argument, `kernel` is a weights matrix\n created by the layer, and `bias` is a bias vector created by the layer\n (only applicable if `use_bias` is `True`).\n\n Note: if the input to the layer has a rank greater than 2, then\n it is flattened prior to the initial dot product with `kernel`.\n\n # Example\n\n ```python\n # as first layer in a sequential model:\n model = Sequential()\n model.add(Dense(32, input_shape=(16,)))\n # now the model will take as input arrays of shape (*, 16)\n # and output arrays of shape (*, 32)\n\n # after the first layer, you don't need to specify\n # the size of the input anymore:\n model.add(Dense(32))\n ```\n\n # Arguments\n units: Positive integer, dimensionality of the output space.\n activation: Activation function to use\n (see [activations](../activations.md)).\n If you don't specify anything, no activation is applied\n (ie. \"linear\" activation: `a(x) = x`).\n use_bias: Boolean, whether the layer uses a bias vector.\n kernel_initializer: Initializer for the `kernel` weights matrix\n (see [initializers](../initializers.md)).\n bias_initializer: Initializer for the bias vector\n (see [initializers](../initializers.md)).\n kernel_regularizer: Regularizer function applied to\n the `kernel` weights matrix\n (see [regularizer](../regularizers.md)).\n bias_regularizer: Regularizer function applied to the bias vector\n (see [regularizer](../regularizers.md)).\n activity_regularizer: Regularizer function applied to\n the output of the layer (its \"activation\").\n (see [regularizer](../regularizers.md)).\n kernel_constraint: Constraint function applied to\n the `kernel` weights matrix\n (see [constraints](../constraints.md)).\n bias_constraint: Constraint function applied to the bias vector\n (see [constraints](../constraints.md)).\n\n # Input shape\n nD tensor with shape: `(batch_size, ..., input_dim)`.\n The most common situation would be\n a 2D input with shape `(batch_size, input_dim)`.\n\n # Output shape\n nD tensor with shape: `(batch_size, ..., units)`.\n For instance, for a 2D input with shape `(batch_size, input_dim)`,\n the output would have shape `(batch_size, units)`.\n \"\"\"\n\n @interfaces.legacy_dense_support\n def __init__(self, units,\n activation=None,\n use_bias=True,\n kernel_initializer='glorot_uniform',\n bias_initializer='zeros',\n kernel_regularizer=None,\n bias_regularizer=None,\n activity_regularizer=None,\n kernel_constraint=None,\n bias_constraint=None,\n **kwargs):\n if 'input_shape' not in kwargs and 'input_dim' in kwargs:\n kwargs['input_shape'] = (kwargs.pop('input_dim'),)\n super(Dense, self).__init__(**kwargs)\n self.units = units\n self.activation = activations.get(activation)\n self.use_bias = use_bias\n self.kernel_initializer = initializers.get(kernel_initializer)\n self.bias_initializer = initializers.get(bias_initializer)\n self.kernel_regularizer = regularizers.get(kernel_regularizer)\n self.bias_regularizer = regularizers.get(bias_regularizer)\n self.activity_regularizer = regularizers.get(activity_regularizer)\n self.kernel_constraint = constraints.get(kernel_constraint)\n self.bias_constraint = constraints.get(bias_constraint)\n self.input_spec = InputSpec(min_ndim=2)\n self.supports_masking = True\n\n def build(self, input_shape):\n assert len(input_shape) >= 2\n input_dim = input_shape[-1]\n\n self.kernel = self.add_weight(shape=(input_dim, self.units),\n initializer=self.kernel_initializer,\n name='kernel',\n regularizer=self.kernel_regularizer,\n constraint=self.kernel_constraint)\n if self.use_bias:\n self.bias = self.add_weight(shape=(self.units,),\n initializer=self.bias_initializer,\n name='bias',\n regularizer=self.bias_regularizer,\n constraint=self.bias_constraint)\n else:\n self.bias = None\n self.input_spec = InputSpec(min_ndim=2, axes={-1: input_dim})\n self.built = True\n\n def call(self, inputs):\n output = K.dot(inputs, self.kernel)\n if self.use_bias:\n output = K.bias_add(output, self.bias)\n if self.activation is not None:\n output = self.activation(output)\n return output\n\n def compute_output_shape(self, input_shape):\n assert input_shape and len(input_shape) >= 2\n assert input_shape[-1]\n output_shape = list(input_shape)\n output_shape[-1] = self.units\n return tuple(output_shape)\n\n def get_config(self):\n config = {\n 'units': self.units,\n 'activation': activations.serialize(self.activation),\n 'use_bias': self.use_bias,\n 'kernel_initializer': initializers.serialize(self.kernel_initializer),\n 'bias_initializer': initializers.serialize(self.bias_initializer),\n 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer),\n 'bias_regularizer': regularizers.serialize(self.bias_regularizer),\n 'activity_regularizer': regularizers.serialize(self.activity_regularizer),\n 'kernel_constraint': constraints.serialize(self.kernel_constraint),\n 'bias_constraint': constraints.serialize(self.bias_constraint)\n }\n base_config = super(Dense, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\nclass ActivityRegularization(Layer):\n \"\"\"Layer that applies an update to the cost function based input activity.\n\n # Arguments\n l1: L1 regularization factor (positive float).\n l2: L2 regularization factor (positive float).\n\n # Input shape\n Arbitrary. Use the keyword argument `input_shape`\n (tuple of integers, does not include the samples axis)\n when using this layer as the first layer in a model.\n\n # Output shape\n Same shape as input.\n \"\"\"\n\n def __init__(self, l1=0., l2=0., **kwargs):\n super(ActivityRegularization, self).__init__(**kwargs)\n self.supports_masking = True\n self.l1 = l1\n self.l2 = l2\n self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2)\n\n def get_config(self):\n config = {'l1': self.l1,\n 'l2': self.l2}\n base_config = super(ActivityRegularization, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n",
"path": "keras/layers/core.py"
}
] | diff --git a/keras/layers/core.py b/keras/layers/core.py
index a89ed39f79a8..557124b5465d 100644
--- a/keras/layers/core.py
+++ b/keras/layers/core.py
@@ -61,7 +61,7 @@ def compute_mask(self, inputs, mask=None):
def call(self, inputs):
boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
axis=-1, keepdims=True)
- return inputs * K.cast(boolean_mask, K.floatx())
+ return inputs * K.cast(boolean_mask, inputs.dtype)
def get_config(self):
config = {'mask_value': self.mask_value}
| Masking a layer that has an integer dtype raises an error in TensorFlow but not Theano.
The following:
```python
from keras.layers import Input, Masking
document = Input(shape = (10, ), dtype = "int32")
mask = Masking(mask_value = 21)
document_mask = mask(document)
```
produces this error:
```
----> 5 document_mask = mask(document)
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/engine/topology.py in __call__(self, inputs, **kwargs)
594
595 # Actually call the layer, collecting output(s), mask(s), and shape(s).
--> 596 output = self.call(inputs, **kwargs)
597 output_mask = self.compute_mask(inputs, previous_mask)
598
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/keras/layers/core.py in call(self, inputs)
62 boolean_mask = K.any(K.not_equal(inputs, self.mask_value),
63 axis=-1, keepdims=True)
---> 64 return inputs * K.cast(boolean_mask, K.floatx())
65
66 def get_config(self):
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/ops/math_ops.py in binary_op_wrapper(x, y)
827 if not isinstance(y, sparse_tensor.SparseTensor):
828 try:
--> 829 y = ops.convert_to_tensor(y, dtype=x.dtype.base_dtype, name="y")
830 except TypeError:
831 # If the RHS is not a tensor, it might be a tensor aware object
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in convert_to_tensor(value, dtype, name, preferred_dtype)
674 name=name,
675 preferred_dtype=preferred_dtype,
--> 676 as_ref=False)
677
678
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype)
739
740 if ret is None:
--> 741 ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref)
742
743 if ret is NotImplemented:
/home/airalcorn2/.pyenv/versions/3.5.2/lib/python3.5/site-packages/tensorflow/python/framework/ops.py in _TensorTensorConversionFunction(t, dtype, name, as_ref)
612 raise ValueError(
613 "Tensor conversion requested dtype %s for Tensor with dtype %s: %r"
--> 614 % (dtype.name, t.dtype.name, str(t)))
615 return t
616
ValueError: Tensor conversion requested dtype int32 for Tensor with dtype float32: 'Tensor("masking_1/Cast_1:0", shape=(?, 1), dtype=float32)'
```
when using TensorFlow as the backend, but works fine with Theano. The issue seems to be that [Keras casts the mask to a float](https://github.com/fchollet/keras/blob/master/keras/layers/core.py#L64), even when the inputs are not floats themselves. Changing the return value to:
```python
inputs * K.cast(boolean_mask, inputs.dtype)
```
fixes the issue.
|
enthought__chaco-731 | [
{
"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, empty, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return empty(shape=(0,))\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n data = self.get_data()\n\n data_len = len(data)\n\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = super().__getstate__()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super()._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n",
"path": "chaco/array_data_source.py"
}
] | [
{
"content": "\"\"\" Defines the ArrayDataSource class.\"\"\"\n\n# Major library imports\nfrom numpy import array, empty, isfinite, ones, ndarray\nimport numpy as np\n\n# Enthought library imports\nfrom traits.api import Any, Constant, Int, Tuple\n\n# Chaco imports\nfrom .base import NumericalSequenceTrait, reverse_map_1d, SortOrderTrait\nfrom .abstract_data_source import AbstractDataSource\n\n\ndef bounded_nanargmin(arr):\n \"\"\"Find the index of the minimum value, ignoring NaNs.\n\n If all NaNs, return 0.\n \"\"\"\n # Different versions of numpy behave differently in the all-NaN case, so we\n # catch this condition in two different ways.\n try:\n if np.issubdtype(arr.dtype, np.floating):\n min = np.nanargmin(arr)\n elif np.issubdtype(arr.dtype, np.number):\n min = np.argmin(arr)\n else:\n min = 0\n except ValueError:\n return 0\n if isfinite(min):\n return min\n else:\n return 0\n\n\ndef bounded_nanargmax(arr):\n \"\"\"Find the index of the maximum value, ignoring NaNs.\n\n If all NaNs, return -1.\n \"\"\"\n try:\n if np.issubdtype(arr.dtype, np.floating):\n max = np.nanargmax(arr)\n elif np.issubdtype(arr.dtype, np.number):\n max = np.argmax(arr)\n else:\n max = -1\n except ValueError:\n return -1\n if isfinite(max):\n return max\n else:\n return -1\n\n\nclass ArrayDataSource(AbstractDataSource):\n \"\"\"A data source representing a single, continuous array of numerical data.\n\n This class does not listen to the array for value changes; if you need that\n behavior, create a subclass that hooks up the appropriate listeners.\n \"\"\"\n\n # ------------------------------------------------------------------------\n # AbstractDataSource traits\n # ------------------------------------------------------------------------\n\n #: The dimensionality of the indices into this data source (overrides\n #: AbstractDataSource).\n index_dimension = Constant(\"scalar\")\n\n #: The dimensionality of the value at each index point (overrides\n #: AbstractDataSource).\n value_dimension = Constant(\"scalar\")\n\n #: The sort order of the data.\n #: This is a specialized optimization for 1-D arrays, but it's an important\n #: one that's used everywhere.\n sort_order = SortOrderTrait\n\n # ------------------------------------------------------------------------\n # Private traits\n # ------------------------------------------------------------------------\n\n # The data array itself.\n _data = NumericalSequenceTrait\n\n # Cached values of min and max as long as **_data** doesn't change.\n _cached_bounds = Tuple\n\n # Not necessary, since this is not a filter, but provided for convenience.\n _cached_mask = Any\n\n # The index of the (first) minimum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _min_index = Any\n\n # The index of the (first) maximum value in self._data\n # FIXME: This is an Any instead of an Int trait because of how Traits\n # typechecks numpy.int64 on 64-bit Windows systems.\n _max_index = Any\n\n # ------------------------------------------------------------------------\n # Public methods\n # ------------------------------------------------------------------------\n\n def __init__(self, data=array([]), sort_order=\"none\", **kw):\n AbstractDataSource.__init__(self, **kw)\n self.set_data(data, sort_order)\n\n def set_data(self, newdata, sort_order=None):\n \"\"\"Sets the data, and optionally the sort order, for this data source.\n\n Parameters\n ----------\n newdata : array\n The data to use.\n sort_order : SortOrderTrait\n The sort order of the data\n \"\"\"\n self._data = newdata\n if sort_order is not None:\n self.sort_order = sort_order\n self._compute_bounds()\n self.data_changed = True\n\n def set_mask(self, mask):\n \"\"\"Sets the mask for this data source.\"\"\"\n self._cached_mask = mask\n self.data_changed = True\n\n def remove_mask(self):\n \"\"\"Removes the mask on this data source.\"\"\"\n self._cached_mask = None\n self.data_changed = True\n\n # ------------------------------------------------------------------------\n # AbstractDataSource interface\n # ------------------------------------------------------------------------\n\n def get_data(self):\n \"\"\"Returns the data for this data source, or 0.0 if it has no data.\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return self._data\n else:\n return empty(shape=(0,))\n\n def get_data_mask(self):\n \"\"\"get_data_mask() -> (data_array, mask_array)\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is None:\n if self._data is None:\n return self._data, ones(0, dtype=bool)\n else:\n return self._data, ones(len(self._data), dtype=bool)\n else:\n return self._data, self._cached_mask\n\n def is_masked(self):\n \"\"\"is_masked() -> bool\n\n Implements AbstractDataSource.\n \"\"\"\n if self._cached_mask is not None:\n return True\n else:\n return False\n\n def get_size(self):\n \"\"\"get_size() -> int\n\n Implements AbstractDataSource.\n \"\"\"\n if self._data is not None:\n return len(self._data)\n else:\n return 0\n\n def get_bounds(self):\n \"\"\"Returns the minimum and maximum values of the data source's data.\n\n Implements AbstractDataSource.\n \"\"\"\n if (\n self._cached_bounds is None\n or self._cached_bounds == ()\n or self._cached_bounds == 0.0\n ):\n self._compute_bounds()\n return self._cached_bounds\n\n def reverse_map(self, pt, index=0, outside_returns_none=True):\n \"\"\"Returns the index of *pt* in the data source.\n\n Parameters\n ----------\n pt : scalar value\n value to find\n index\n ignored for data series with 1-D indices\n outside_returns_none : Boolean\n Whether the method returns None if *pt* is outside the range of\n the data source; if False, the method returns the value of the\n bound that *pt* is outside of.\n \"\"\"\n if self.sort_order == \"none\":\n raise NotImplementedError\n\n # index is ignored for dataseries with 1-dimensional indices\n minval, maxval = self._cached_bounds\n if pt < minval:\n if outside_returns_none:\n return None\n else:\n return self._min_index\n elif pt > maxval:\n if outside_returns_none:\n return None\n else:\n return self._max_index\n else:\n return reverse_map_1d(self._data, pt, self.sort_order)\n\n # ------------------------------------------------------------------------\n # Private methods\n # ------------------------------------------------------------------------\n\n def _compute_bounds(self, data=None):\n \"\"\"Computes the minimum and maximum values of self._data.\n\n If a data array is passed in, then that is used instead of self._data.\n This behavior is useful for subclasses.\n \"\"\"\n # TODO: as an optimization, perhaps create and cache a sorted\n # version of the dataset?\n\n if data is None:\n data = self.get_data()\n\n data_len = len(data)\n\n if data_len == 0:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (0.0, 0.0)\n elif data_len == 1:\n self._min_index = 0\n self._max_index = 0\n self._cached_bounds = (data[0], data[0])\n else:\n if self.sort_order == \"ascending\":\n self._min_index = 0\n self._max_index = -1\n elif self.sort_order == \"descending\":\n self._min_index = -1\n self._max_index = 0\n else:\n # ignore NaN values. This is probably a little slower,\n # but also much safer.\n\n # data might be an array of strings or objects that\n # can't have argmin calculated on them.\n try:\n # the data may be in a subclass of numpy.array, viewing\n # the data as a ndarray will remove side effects of\n # the subclasses, such as different operator behaviors\n self._min_index = bounded_nanargmin(data.view(ndarray))\n self._max_index = bounded_nanargmax(data.view(ndarray))\n except (TypeError, IndexError, NotImplementedError):\n # For strings and objects, we punt... These show up in\n # label-ish data sources.\n self._cached_bounds = (0.0, 0.0)\n\n self._cached_bounds = (\n data[self._min_index],\n data[self._max_index],\n )\n\n # ------------------------------------------------------------------------\n # Event handlers\n # ------------------------------------------------------------------------\n\n def _metadata_changed(self, event):\n self.metadata_changed = True\n\n def _metadata_items_changed(self, event):\n self.metadata_changed = True\n\n # ------------------------------------------------------------------------\n # Persistence-related methods\n # ------------------------------------------------------------------------\n\n def __getstate__(self):\n state = super().__getstate__()\n if not self.persist_data:\n state.pop(\"_data\", None)\n state.pop(\"_cached_mask\", None)\n state.pop(\"_cached_bounds\", None)\n state.pop(\"_min_index\", None)\n state.pop(\"_max_index\", None)\n return state\n\n def _post_load(self):\n super()._post_load()\n self._cached_bounds = ()\n self._cached_mask = None\n",
"path": "chaco/array_data_source.py"
}
] | diff --git a/chaco/array_data_source.py b/chaco/array_data_source.py
index bda9a8b08..379e6ce82 100644
--- a/chaco/array_data_source.py
+++ b/chaco/array_data_source.py
@@ -155,7 +155,10 @@ def get_data_mask(self):
Implements AbstractDataSource.
"""
if self._cached_mask is None:
- return self._data, ones(len(self._data), dtype=bool)
+ if self._data is None:
+ return self._data, ones(0, dtype=bool)
+ else:
+ return self._data, ones(len(self._data), dtype=bool)
else:
return self._data, self._cached_mask
diff --git a/chaco/tests/test_arraydatasource.py b/chaco/tests/test_arraydatasource.py
index 42fecc730..5160fb572 100644
--- a/chaco/tests/test_arraydatasource.py
+++ b/chaco/tests/test_arraydatasource.py
@@ -99,13 +99,12 @@ def test_get_data_mask(self):
assert_array_equal(data, self.myarray)
assert_array_equal(mask, self.mymask)
- @unittest.skip("get_data_mask() fails in this case")
def test_get_data_mask_no_data(self):
data_source = ArrayDataSource(None)
data, mask = data_source.get_data_mask()
- assert_array_equal(data, 0.0)
- assert_array_equal(mask, True)
+ self.assertEqual(data, None)
+ assert_array_equal(mask, ones(shape=0, dtype=bool))
def test_get_data_mask_no_mask(self):
data, mask = self.data_source.get_data_mask()
| ArrayDataSource get_mask_data() fails when data is None
See this test here:
https://github.com/enthought/chaco/blob/enh/data-source-tests/chaco/tests/arraydatasource_test_case.py#L108
More generally, I think that the behaviour for an empty data source is probably wrong (why a _scalar_ `0.0` instead of `array([])`?) but I'm not sure what will break if that is changed.
|
meltano__meltano-7343 | [
{
"content": "\"\"\"Interactive configuration handler.\"\"\"\n\nfrom __future__ import annotations\n\nimport click\nfrom jinja2 import BaseLoader, Environment\nfrom rich.console import Console, Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom meltano.cli.interactive.utils import InteractionStatus\nfrom meltano.cli.utils import CliError\nfrom meltano.core.environment_service import EnvironmentService\nfrom meltano.core.project import Project\nfrom meltano.core.settings_service import (\n REDACTED_VALUE,\n SettingKind,\n SettingsService,\n SettingValueStore,\n)\nfrom meltano.core.settings_store import StoreNotSupportedError\nfrom meltano.core.tracking.contexts import CliEvent\n\nPLUGIN_COLOR = \"magenta\"\nENVIRONMENT_COLOR = \"orange1\"\nSETTING_COLOR = \"blue1\"\nVALUE_COLOR = \"green\"\n\nHOME_SCREEN_TEMPLATE = \"\"\"[bold underline]Configuring [{{ plugin_color }}]{{ plugin_name.capitalize() | safe }}[/{{ plugin_color }}] {% if environment_name %}in Environment[{{ environment_color }}]{{ environment_name }}[/{{ environment_color }}] {% endif %}Interactively[/bold underline]\n\nFollowing the prompts below, you will be guided through configuration of this plugin.\n\nMeltano is responsible for managing the configuration of all of a project’s plugins.\nIt knows what settings are supported by each plugin, and how and when different types of plugins expect to be fed that configuration.\n\nTo determine the values of settings, Meltano will look in 4 main places, with each taking precedence over the next:\n\n 1. Environment variables\n 2. Your meltano.yml project file\n 3. Your project's system database\n 4. The default values set in the plugin's settings metadata\n\nWithin meltano.yml you can also associate configuration with a Meltano Environment, allowing you to define custom layers of configuration within your project.\n\nTo learn more about configuration options, see the [link=https://docs.meltano.com/guide/configuration]Meltano Configuration Guide[/link]\n\n[bold underline]Settings[/bold underline]\n{% for setting in settings %}\n{{ loop.index }}. [blue]{{ setting[\"name\"] }}[/blue]: {{ setting[\"description\"] | safe }}\n{%- endfor %}\n\n{% if plugin_url %}To learn more about {{ plugin_name | safe }} and its settings, visit [link={{ plugin_url }}]{{ plugin_url }}[/link]{% endif %}\n\"\"\"\n\n\nclass InteractiveConfig: # noqa: WPS230, WPS214\n \"\"\"Manage Config interactively.\"\"\"\n\n def __init__(self, ctx, store, extras=False, max_width=None):\n \"\"\"Initialise InteractiveConfig instance.\"\"\"\n self.ctx = ctx\n self.store = store\n self.extras = extras\n self.project: Project = self.ctx.obj[\"project\"]\n self.settings: SettingsService = self.ctx.obj[\"settings\"]\n self.session = self.ctx.obj[\"session\"]\n self.tracker = self.ctx.obj[\"tracker\"]\n self.environment_service = EnvironmentService(self.project)\n self.max_width = max_width or 75 # noqa: WPS432\n self.console = Console()\n\n @property\n def configurable_settings(self):\n \"\"\"Return settings available for interactive configuration.\"\"\"\n return self.settings.config_with_metadata(\n session=self.session, extras=self.extras, redacted=True\n )\n\n @property\n def setting_choices(self):\n \"\"\"Return simplified setting choices, for easy printing.\"\"\"\n setting_choices = []\n for index, (name, config_metadata) in enumerate(\n self.configurable_settings.items()\n ):\n description = config_metadata[\"setting\"].description\n description = \"\" if description is None else description\n setting_choices.append((str(index + 1), name, description))\n return setting_choices\n\n def truncate(self, text: str) -> str:\n \"\"\"Truncate text.\"\"\"\n if len(text) >= self.max_width:\n return f\"{text[: self.max_width - 3]}...\"\n return text\n\n def _print_home_screen(self):\n \"\"\"Print screen for this interactive.\"\"\"\n markdown_template = Environment(loader=BaseLoader, autoescape=True).from_string(\n HOME_SCREEN_TEMPLATE\n )\n markdown_text = markdown_template.render(\n {\n \"plugin_color\": PLUGIN_COLOR,\n \"environment_color\": ENVIRONMENT_COLOR,\n \"setting_color\": SETTING_COLOR,\n \"plugin_name\": self.settings.label,\n \"plugin_url\": self.settings.docs_url,\n \"environment_name\": self.project.environment.name\n if self.project.environment\n else None,\n \"settings\": [\n {\n \"name\": name,\n \"description\": self.truncate(description.replace(\"\\n\", \" \")),\n }\n for _, name, description in self.setting_choices\n ],\n }\n )\n self.console.print(Panel(Text.from_markup(markdown_text)))\n\n def _print_setting(self, name, config_metadata, index, last_index):\n \"\"\"Print setting.\"\"\"\n value = config_metadata[\"value\"]\n source = config_metadata[\"source\"]\n setting_def = config_metadata[\"setting\"]\n details = Table(show_header=False)\n details.add_column(\"name\", justify=\"right\")\n details.add_column(\"value\")\n\n pre = [\n Text.from_markup(\n f\"[bold underline][{PLUGIN_COLOR}]{self.settings.label.capitalize()}[/{PLUGIN_COLOR}][/bold underline] Setting {index} of {last_index}\"\n )\n ]\n\n if setting_def.is_extra:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Extra: plugin-specific options handled by Meltano[/yellow1]\"\n )\n )\n\n elif setting_def.is_custom:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Setting: possibly unsupported by the plugin[/yellow1]\"\n )\n )\n\n details.add_row(\n Text(\"Name\"), Text.from_markup(f\"[{SETTING_COLOR}]{name}[/{SETTING_COLOR}]\")\n )\n\n if source is SettingValueStore.DEFAULT:\n label = \"default\"\n elif source is SettingValueStore.INHERITED:\n label = f\"inherited from '{self.settings.plugin.parent.name}'\"\n else:\n label = f\"from {source.label}\"\n expanded_value = value if value is not None else \"(empty string)\"\n unexpanded_value = config_metadata.get(\"unexpanded_value\")\n if unexpanded_value:\n current_value = (\n unexpanded_value if unexpanded_value is not None else \"(empty string)\"\n )\n\n details.add_row(Text(\"Current Expanded Value\"), Text(f\"{expanded_value}\"))\n else:\n current_value = value if value is not None else \"(empty string)\"\n details.add_row(\n Text(f\"Current Value ({label})\"),\n Text.from_markup(f\"[{VALUE_COLOR}]{current_value}[/{VALUE_COLOR}]\"),\n )\n\n if setting_def.kind:\n details.add_row(Text(\"Kind\"), Text(f\"{setting_def.kind}\"))\n if source is not SettingValueStore.DEFAULT:\n default_value = setting_def.value\n if default_value is not None:\n details.add_row(Text(\"Default\"), Text(f\"{default_value!r}\"))\n env_keys = [\n var.definition for var in self.settings.setting_env_vars(setting_def)\n ]\n\n details.add_row(Text(\"Env(s)\"), Text(f\"{', '.join(env_keys)}\"))\n post = []\n if setting_def.description:\n post.append(\n Group(\n Text(\" Description:\"),\n Panel(Markdown(setting_def.description, justify=\"left\")),\n )\n )\n\n docs_url = self.settings.docs_url\n if docs_url:\n post.append(\n Text.from_markup(\n f\" To learn more about {self.settings.label} and its settings, visit [link={docs_url}]{docs_url}[/link]\"\n )\n )\n\n self.console.print(Panel(Group(*pre, details, *post)))\n\n @staticmethod\n def _value_prompt(config_metadata):\n if config_metadata[\"setting\"].kind != SettingKind.OPTIONS:\n return (\n click.prompt(\n \"New value\",\n default=\"\",\n show_default=False,\n hide_input=True,\n confirmation_prompt=True,\n )\n if config_metadata[\"setting\"].is_redacted\n else click.prompt(\"New value\", default=\"\", show_default=False)\n )\n\n options_index = {\n str(index + 1): value\n for index, value in enumerate(\n (chs[\"label\"], chs[\"value\"])\n for chs in config_metadata[\"setting\"].options\n )\n }\n\n click.echo()\n for index, value in options_index.items():\n click.echo(f\"{index}. {value[0]}\")\n click.echo()\n chosen_index = click.prompt(\n \"Select value\",\n type=click.Choice(list(options_index.keys())),\n show_default=False,\n )\n return options_index[chosen_index][1]\n\n def configure(self, name, index=None, last_index=None, show_set_prompt=True):\n \"\"\"Configure a single setting interactively.\"\"\"\n config_metadata = next(\n (\n config_metadata\n for nme, config_metadata in self.configurable_settings.items()\n if nme == name\n )\n )\n self._print_setting(\n name=name,\n config_metadata=config_metadata,\n index=index,\n last_index=last_index,\n )\n\n action = \"y\"\n if show_set_prompt:\n try:\n click.echo()\n action = click.prompt(\n \"Set this value (Y/n) or exit (e)?\",\n default=\"y\",\n type=click.Choice([\"y\", \"n\", \"e\"], case_sensitive=False),\n )\n except click.Abort:\n action = \"e\"\n\n if action.lower() == \"y\":\n while True:\n click.echo()\n try:\n new_value = self._value_prompt(config_metadata)\n except click.Abort:\n click.echo()\n click.echo(\"Skipping...\")\n click.pause()\n return InteractionStatus.SKIP\n\n try:\n click.echo()\n self.set_value(\n setting_name=tuple(name.split(\".\")),\n value=new_value,\n store=self.store,\n interactive=True,\n )\n click.echo()\n click.pause()\n return InteractionStatus.SKIP\n except Exception as e:\n self.tracker.track_command_event(CliEvent.inflight)\n click.secho(f\"Failed to set value: {e}\", fg=\"red\")\n\n elif action.lower() == \"n\":\n return InteractionStatus.SKIP\n\n elif action.lower() == \"e\":\n return InteractionStatus.EXIT\n\n def configure_all(self):\n \"\"\"Configure all settings.\"\"\"\n numeric_choices = [idx for idx, _, _ in self.setting_choices]\n if not numeric_choices:\n click.secho(\n \"There are no settings to configure. \"\n \"For help, please see https://melta.no#no-plugin-settings-defined\",\n fg=\"yellow\",\n )\n self.tracker.track_command_event(CliEvent.completed)\n return\n\n while True:\n click.clear()\n self._print_home_screen()\n choices = [\"all\", *numeric_choices, \"e\"]\n\n branch = \"all\"\n try:\n click.echo()\n branch = click.prompt(\n \"Loop through all settings (all), select a setting by \"\n f\"number ({min(int(chs) for chs in numeric_choices)} - \"\n f\"{max(int(chs) for chs in numeric_choices)}), or exit (e)?\",\n type=click.Choice(choices, case_sensitive=False),\n default=\"all\",\n show_choices=False,\n )\n except click.Abort:\n click.echo()\n branch = \"e\"\n\n if branch == \"all\":\n for index, name, _ in self.setting_choices:\n click.clear()\n status = InteractionStatus.START\n while status not in {\n InteractionStatus.SKIP,\n InteractionStatus.EXIT,\n }:\n status = self.configure(\n name=name,\n index=index,\n last_index=len(self.setting_choices),\n )\n if status == InteractionStatus.EXIT:\n break\n elif branch.lower() == \"e\":\n self.tracker.track_command_event(CliEvent.completed)\n click.echo()\n return\n else:\n choice_name = next(\n nme for idx, nme, _ in self.setting_choices if idx == branch\n )\n click.clear()\n status = self.configure(\n name=choice_name,\n index=branch,\n last_index=len(self.setting_choices),\n show_set_prompt=False,\n )\n\n def set_value(self, setting_name, value, store, interactive=False):\n \"\"\"Set value helper function.\"\"\"\n settings = self.settings\n path = list(setting_name)\n try:\n value, metadata = settings.set_with_metadata(\n path, value, store=store, session=self.session\n )\n except StoreNotSupportedError as err:\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.aborted)\n raise CliError(\n f\"{settings.label.capitalize()} setting '{path}' could not be set in {store.label}: {err}\"\n ) from err\n\n name = metadata[\"name\"]\n store = metadata[\"store\"]\n is_redacted = metadata[\"setting\"] and metadata[\"setting\"].is_redacted\n if is_redacted:\n value = REDACTED_VALUE\n click.secho(\n f\"{settings.label.capitalize()} setting '{name}' was set in {store.label}: {value!r}\",\n fg=VALUE_COLOR,\n )\n\n current_value, source = settings.get_with_source(name, session=self.session)\n if source != store:\n if is_redacted:\n current_value = REDACTED_VALUE\n click.secho(\n f\"Current value is still: {current_value!r} (from {source.label})\",\n fg=\"yellow\",\n )\n\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.completed)\n",
"path": "src/meltano/cli/interactive/config.py"
}
] | [
{
"content": "\"\"\"Interactive configuration handler.\"\"\"\n\nfrom __future__ import annotations\n\nfrom contextlib import suppress\n\n# NOTE: Importing the readline module enables the use of arrow\n# keys for text navigation during interactive config.\n# Refer to https://docs.python.org/3/library/readline.html\nwith suppress(ImportError):\n import readline # noqa: F401\n\nimport click\nfrom jinja2 import BaseLoader, Environment\nfrom rich.console import Console, Group\nfrom rich.markdown import Markdown\nfrom rich.panel import Panel\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom meltano.cli.interactive.utils import InteractionStatus\nfrom meltano.cli.utils import CliError\nfrom meltano.core.environment_service import EnvironmentService\nfrom meltano.core.project import Project\nfrom meltano.core.settings_service import (\n REDACTED_VALUE,\n SettingKind,\n SettingsService,\n SettingValueStore,\n)\nfrom meltano.core.settings_store import StoreNotSupportedError\nfrom meltano.core.tracking.contexts import CliEvent\n\nPLUGIN_COLOR = \"magenta\"\nENVIRONMENT_COLOR = \"orange1\"\nSETTING_COLOR = \"blue1\"\nVALUE_COLOR = \"green\"\n\nHOME_SCREEN_TEMPLATE = \"\"\"[bold underline]Configuring [{{ plugin_color }}]{{ plugin_name.capitalize() | safe }}[/{{ plugin_color }}] {% if environment_name %}in Environment[{{ environment_color }}]{{ environment_name }}[/{{ environment_color }}] {% endif %}Interactively[/bold underline]\n\nFollowing the prompts below, you will be guided through configuration of this plugin.\n\nMeltano is responsible for managing the configuration of all of a project’s plugins.\nIt knows what settings are supported by each plugin, and how and when different types of plugins expect to be fed that configuration.\n\nTo determine the values of settings, Meltano will look in 4 main places, with each taking precedence over the next:\n\n 1. Environment variables\n 2. Your meltano.yml project file\n 3. Your project's system database\n 4. The default values set in the plugin's settings metadata\n\nWithin meltano.yml you can also associate configuration with a Meltano Environment, allowing you to define custom layers of configuration within your project.\n\nTo learn more about configuration options, see the [link=https://docs.meltano.com/guide/configuration]Meltano Configuration Guide[/link]\n\n[bold underline]Settings[/bold underline]\n{% for setting in settings %}\n{{ loop.index }}. [blue]{{ setting[\"name\"] }}[/blue]: {{ setting[\"description\"] | safe }}\n{%- endfor %}\n\n{% if plugin_url %}To learn more about {{ plugin_name | safe }} and its settings, visit [link={{ plugin_url }}]{{ plugin_url }}[/link]{% endif %}\n\"\"\"\n\n\nclass InteractiveConfig: # noqa: WPS230, WPS214\n \"\"\"Manage Config interactively.\"\"\"\n\n def __init__(self, ctx, store, extras=False, max_width=None):\n \"\"\"Initialise InteractiveConfig instance.\"\"\"\n self.ctx = ctx\n self.store = store\n self.extras = extras\n self.project: Project = self.ctx.obj[\"project\"]\n self.settings: SettingsService = self.ctx.obj[\"settings\"]\n self.session = self.ctx.obj[\"session\"]\n self.tracker = self.ctx.obj[\"tracker\"]\n self.environment_service = EnvironmentService(self.project)\n self.max_width = max_width or 75 # noqa: WPS432\n self.console = Console()\n\n @property\n def configurable_settings(self):\n \"\"\"Return settings available for interactive configuration.\"\"\"\n return self.settings.config_with_metadata(\n session=self.session, extras=self.extras, redacted=True\n )\n\n @property\n def setting_choices(self):\n \"\"\"Return simplified setting choices, for easy printing.\"\"\"\n setting_choices = []\n for index, (name, config_metadata) in enumerate(\n self.configurable_settings.items()\n ):\n description = config_metadata[\"setting\"].description\n description = \"\" if description is None else description\n setting_choices.append((str(index + 1), name, description))\n return setting_choices\n\n def truncate(self, text: str) -> str:\n \"\"\"Truncate text.\"\"\"\n if len(text) >= self.max_width:\n return f\"{text[: self.max_width - 3]}...\"\n return text\n\n def _print_home_screen(self):\n \"\"\"Print screen for this interactive.\"\"\"\n markdown_template = Environment(loader=BaseLoader, autoescape=True).from_string(\n HOME_SCREEN_TEMPLATE\n )\n markdown_text = markdown_template.render(\n {\n \"plugin_color\": PLUGIN_COLOR,\n \"environment_color\": ENVIRONMENT_COLOR,\n \"setting_color\": SETTING_COLOR,\n \"plugin_name\": self.settings.label,\n \"plugin_url\": self.settings.docs_url,\n \"environment_name\": self.project.environment.name\n if self.project.environment\n else None,\n \"settings\": [\n {\n \"name\": name,\n \"description\": self.truncate(description.replace(\"\\n\", \" \")),\n }\n for _, name, description in self.setting_choices\n ],\n }\n )\n self.console.print(Panel(Text.from_markup(markdown_text)))\n\n def _print_setting(self, name, config_metadata, index, last_index):\n \"\"\"Print setting.\"\"\"\n value = config_metadata[\"value\"]\n source = config_metadata[\"source\"]\n setting_def = config_metadata[\"setting\"]\n details = Table(show_header=False)\n details.add_column(\"name\", justify=\"right\")\n details.add_column(\"value\")\n\n pre = [\n Text.from_markup(\n f\"[bold underline][{PLUGIN_COLOR}]{self.settings.label.capitalize()}[/{PLUGIN_COLOR}][/bold underline] Setting {index} of {last_index}\"\n )\n ]\n\n if setting_def.is_extra:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Extra: plugin-specific options handled by Meltano[/yellow1]\"\n )\n )\n\n elif setting_def.is_custom:\n pre.append(\n Text.from_markup(\n \"[yellow1]Custom Setting: possibly unsupported by the plugin[/yellow1]\"\n )\n )\n\n details.add_row(\n Text(\"Name\"), Text.from_markup(f\"[{SETTING_COLOR}]{name}[/{SETTING_COLOR}]\")\n )\n\n if source is SettingValueStore.DEFAULT:\n label = \"default\"\n elif source is SettingValueStore.INHERITED:\n label = f\"inherited from '{self.settings.plugin.parent.name}'\"\n else:\n label = f\"from {source.label}\"\n expanded_value = value if value is not None else \"(empty string)\"\n unexpanded_value = config_metadata.get(\"unexpanded_value\")\n if unexpanded_value:\n current_value = (\n unexpanded_value if unexpanded_value is not None else \"(empty string)\"\n )\n\n details.add_row(Text(\"Current Expanded Value\"), Text(f\"{expanded_value}\"))\n else:\n current_value = value if value is not None else \"(empty string)\"\n details.add_row(\n Text(f\"Current Value ({label})\"),\n Text.from_markup(f\"[{VALUE_COLOR}]{current_value}[/{VALUE_COLOR}]\"),\n )\n\n if setting_def.kind:\n details.add_row(Text(\"Kind\"), Text(f\"{setting_def.kind}\"))\n if source is not SettingValueStore.DEFAULT:\n default_value = setting_def.value\n if default_value is not None:\n details.add_row(Text(\"Default\"), Text(f\"{default_value!r}\"))\n env_keys = [\n var.definition for var in self.settings.setting_env_vars(setting_def)\n ]\n\n details.add_row(Text(\"Env(s)\"), Text(f\"{', '.join(env_keys)}\"))\n post = []\n if setting_def.description:\n post.append(\n Group(\n Text(\" Description:\"),\n Panel(Markdown(setting_def.description, justify=\"left\")),\n )\n )\n\n docs_url = self.settings.docs_url\n if docs_url:\n post.append(\n Text.from_markup(\n f\" To learn more about {self.settings.label} and its settings, visit [link={docs_url}]{docs_url}[/link]\"\n )\n )\n\n self.console.print(Panel(Group(*pre, details, *post)))\n\n @staticmethod\n def _value_prompt(config_metadata):\n if config_metadata[\"setting\"].kind != SettingKind.OPTIONS:\n return (\n click.prompt(\n \"New value\",\n default=\"\",\n show_default=False,\n hide_input=True,\n confirmation_prompt=True,\n )\n if config_metadata[\"setting\"].is_redacted\n else click.prompt(\"New value\", default=\"\", show_default=False)\n )\n\n options_index = {\n str(index + 1): value\n for index, value in enumerate(\n (chs[\"label\"], chs[\"value\"])\n for chs in config_metadata[\"setting\"].options\n )\n }\n\n click.echo()\n for index, value in options_index.items():\n click.echo(f\"{index}. {value[0]}\")\n click.echo()\n chosen_index = click.prompt(\n \"Select value\",\n type=click.Choice(list(options_index.keys())),\n show_default=False,\n )\n return options_index[chosen_index][1]\n\n def configure(self, name, index=None, last_index=None, show_set_prompt=True):\n \"\"\"Configure a single setting interactively.\"\"\"\n config_metadata = next(\n (\n config_metadata\n for nme, config_metadata in self.configurable_settings.items()\n if nme == name\n )\n )\n self._print_setting(\n name=name,\n config_metadata=config_metadata,\n index=index,\n last_index=last_index,\n )\n\n action = \"y\"\n if show_set_prompt:\n try:\n click.echo()\n action = click.prompt(\n \"Set this value (Y/n) or exit (e)?\",\n default=\"y\",\n type=click.Choice([\"y\", \"n\", \"e\"], case_sensitive=False),\n )\n except click.Abort:\n action = \"e\"\n\n if action.lower() == \"y\":\n while True:\n click.echo()\n try:\n new_value = self._value_prompt(config_metadata)\n except click.Abort:\n click.echo()\n click.echo(\"Skipping...\")\n click.pause()\n return InteractionStatus.SKIP\n\n try:\n click.echo()\n self.set_value(\n setting_name=tuple(name.split(\".\")),\n value=new_value,\n store=self.store,\n interactive=True,\n )\n click.echo()\n click.pause()\n return InteractionStatus.SKIP\n except Exception as e:\n self.tracker.track_command_event(CliEvent.inflight)\n click.secho(f\"Failed to set value: {e}\", fg=\"red\")\n\n elif action.lower() == \"n\":\n return InteractionStatus.SKIP\n\n elif action.lower() == \"e\":\n return InteractionStatus.EXIT\n\n def configure_all(self):\n \"\"\"Configure all settings.\"\"\"\n numeric_choices = [idx for idx, _, _ in self.setting_choices]\n if not numeric_choices:\n click.secho(\n \"There are no settings to configure. \"\n \"For help, please see https://melta.no#no-plugin-settings-defined\",\n fg=\"yellow\",\n )\n self.tracker.track_command_event(CliEvent.completed)\n return\n\n while True:\n click.clear()\n self._print_home_screen()\n choices = [\"all\", *numeric_choices, \"e\"]\n\n branch = \"all\"\n try:\n click.echo()\n branch = click.prompt(\n \"Loop through all settings (all), select a setting by \"\n f\"number ({min(int(chs) for chs in numeric_choices)} - \"\n f\"{max(int(chs) for chs in numeric_choices)}), or exit (e)?\",\n type=click.Choice(choices, case_sensitive=False),\n default=\"all\",\n show_choices=False,\n )\n except click.Abort:\n click.echo()\n branch = \"e\"\n\n if branch == \"all\":\n for index, name, _ in self.setting_choices:\n click.clear()\n status = InteractionStatus.START\n while status not in {\n InteractionStatus.SKIP,\n InteractionStatus.EXIT,\n }:\n status = self.configure(\n name=name,\n index=index,\n last_index=len(self.setting_choices),\n )\n if status == InteractionStatus.EXIT:\n break\n elif branch.lower() == \"e\":\n self.tracker.track_command_event(CliEvent.completed)\n click.echo()\n return\n else:\n choice_name = next(\n nme for idx, nme, _ in self.setting_choices if idx == branch\n )\n click.clear()\n status = self.configure(\n name=choice_name,\n index=branch,\n last_index=len(self.setting_choices),\n show_set_prompt=False,\n )\n\n def set_value(self, setting_name, value, store, interactive=False):\n \"\"\"Set value helper function.\"\"\"\n settings = self.settings\n path = list(setting_name)\n try:\n value, metadata = settings.set_with_metadata(\n path, value, store=store, session=self.session\n )\n except StoreNotSupportedError as err:\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.aborted)\n raise CliError(\n f\"{settings.label.capitalize()} setting '{path}' could not be set in {store.label}: {err}\"\n ) from err\n\n name = metadata[\"name\"]\n store = metadata[\"store\"]\n is_redacted = metadata[\"setting\"] and metadata[\"setting\"].is_redacted\n if is_redacted:\n value = REDACTED_VALUE\n click.secho(\n f\"{settings.label.capitalize()} setting '{name}' was set in {store.label}: {value!r}\",\n fg=VALUE_COLOR,\n )\n\n current_value, source = settings.get_with_source(name, session=self.session)\n if source != store:\n if is_redacted:\n current_value = REDACTED_VALUE\n click.secho(\n f\"Current value is still: {current_value!r} (from {source.label})\",\n fg=\"yellow\",\n )\n\n if interactive:\n self.tracker.track_command_event(CliEvent.inflight)\n else:\n self.tracker.track_command_event(CliEvent.completed)\n",
"path": "src/meltano/cli/interactive/config.py"
}
] | diff --git a/src/meltano/cli/interactive/config.py b/src/meltano/cli/interactive/config.py
index 674aae02f7..705c79fa82 100644
--- a/src/meltano/cli/interactive/config.py
+++ b/src/meltano/cli/interactive/config.py
@@ -2,6 +2,14 @@
from __future__ import annotations
+from contextlib import suppress
+
+# NOTE: Importing the readline module enables the use of arrow
+# keys for text navigation during interactive config.
+# Refer to https://docs.python.org/3/library/readline.html
+with suppress(ImportError):
+ import readline # noqa: F401
+
import click
from jinja2 import BaseLoader, Environment
from rich.console import Console, Group
| feature: Support arrow key text navigation during interactive config
### Feature scope
CLI (options, error messages, logging, etc.)
### Description
Currently when using interactive config, the arrow keys are interpreted as raw values, rather than as navigation controls:
Examples:
Pressing the up key to try to set the prompt to the last value entered:
```
New value: ^[[A
```
Pressing the left key repeatedly to try to add a missing quote:
```
New value: example"^[[D^[[D^[[D^[[D^[[D^[[D^[[D^[[D
```
Ideally arrow keys pressed during interactive config would result in typical text navigation behaviour.
|
nipy__nipype-3634 | [
{
"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"PETPVC is a toolbox for partial volume correction in positron emission tomography.\"\"\"\nimport os\n\nfrom .base import (\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File,\n isdefined,\n traits,\n)\nfrom ..utils.filemanip import fname_presuffix\nfrom ..external.due import BibTeX\n\npvc_methods = [\n \"GTM\",\n \"IY\",\n \"IY+RL\",\n \"IY+VC\",\n \"LABBE\",\n \"LABBE+MTC\",\n \"LABBE+MTC+RL\",\n \"LABBE+MTC+VC\",\n \"LABBE+RBV\",\n \"LABBE+RBV+RL\",\n \"LABBE+RBV+VC\",\n \"MG\",\n \"MG+RL\",\n \"MG+VC\",\n \"MTC\",\n \"MTC+RL\",\n \"MTC+VC\",\n \"RBV\",\n \"RBV+RL\",\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n]\n\n\nclass PETPVCInputSpec(CommandLineInputSpec):\n in_file = File(desc=\"PET image file\", exists=True, mandatory=True, argstr=\"-i %s\")\n out_file = File(desc=\"Output file\", genfile=True, hash_files=False, argstr=\"-o %s\")\n mask_file = File(\n desc=\"Mask image file\", exists=True, mandatory=True, argstr=\"-m %s\"\n )\n pvc = traits.Enum(\n pvc_methods,\n mandatory=True,\n argstr=\"-p %s\",\n desc=\"\"\"\\\nDesired PVC method:\n\n * Geometric transfer matrix -- ``GTM``\n * Labbe approach -- ``LABBE``\n * Richardson-Lucy -- ``RL``\n * Van-Cittert -- ``VC``\n * Region-based voxel-wise correction -- ``RBV``\n * RBV with Labbe -- ``LABBE+RBV``\n * RBV with Van-Cittert -- ``RBV+VC``\n * RBV with Richardson-Lucy -- ``RBV+RL``\n * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``\n * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``\n * Multi-target correction -- ``MTC``\n * MTC with Labbe -- ``LABBE+MTC``\n * MTC with Van-Cittert -- ``MTC+VC``\n * MTC with Richardson-Lucy -- ``MTC+RL``\n * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``\n * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``\n * Iterative Yang -- ``IY``\n * Iterative Yang with Van-Cittert -- ``IY+VC``\n * Iterative Yang with Richardson-Lucy -- ``IY+RL``\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n\n\"\"\",\n )\n fwhm_x = traits.Float(\n desc=\"The full-width at half maximum in mm along x-axis\",\n mandatory=True,\n argstr=\"-x %.4f\",\n )\n fwhm_y = traits.Float(\n desc=\"The full-width at half maximum in mm along y-axis\",\n mandatory=True,\n argstr=\"-y %.4f\",\n )\n fwhm_z = traits.Float(\n desc=\"The full-width at half maximum in mm along z-axis\",\n mandatory=True,\n argstr=\"-z %.4f\",\n )\n debug = traits.Bool(\n desc=\"Prints debug information\",\n usedefault=True,\n default_value=False,\n argstr=\"-d\",\n )\n n_iter = traits.Int(\n desc=\"Number of iterations\", default_value=10, usedefault=True, argstr=\"-n %d\"\n )\n n_deconv = traits.Int(\n desc=\"Number of deconvolution iterations\",\n default_value=10,\n usedefault=True,\n argstr=\"-k %d\",\n )\n alpha = traits.Float(\n desc=\"Alpha value\", default_value=1.5, usedefault=True, argstr=\"-a %.4f\"\n )\n stop_crit = traits.Float(\n desc=\"Stopping criterion\", default_value=0.01, usedefault=True, argstr=\"-s %.4f\"\n )\n\n\nclass PETPVCOutputSpec(TraitedSpec):\n out_file = File(desc=\"Output file\")\n\n\nclass PETPVC(CommandLine):\n \"\"\"Use PETPVC for partial volume correction of PET images.\n\n PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department\n of the UCL University Hospital, London, UK.\n\n Examples\n --------\n >>> from ..testing import example_data\n >>> #TODO get data for PETPVC\n >>> pvc = PETPVC()\n >>> pvc.inputs.in_file = 'pet.nii.gz'\n >>> pvc.inputs.mask_file = 'tissues.nii.gz'\n >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'\n >>> pvc.inputs.pvc = 'RBV'\n >>> pvc.inputs.fwhm_x = 2.0\n >>> pvc.inputs.fwhm_y = 2.0\n >>> pvc.inputs.fwhm_z = 2.0\n >>> outs = pvc.run() #doctest: +SKIP\n\n References\n ----------\n .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,\n \"A review of partial volume correction techniques for emission tomography\n and their applications in neurology, cardiology and oncology,\" Phys. Med.\n Biol., vol. 57, no. 21, p. R119, 2012.\n .. [2] https://github.com/UCL/PETPVC\n\n \"\"\"\n\n input_spec = PETPVCInputSpec\n output_spec = PETPVCOutputSpec\n _cmd = \"petpvc\"\n\n _references = [\n {\n \"entry\": BibTeX(\n \"@article{0031-9155-61-22-7975,\"\n \"author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and \"\n \"Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},\"\n \"title={PETPVC: a toolbox for performing partial volume correction \"\n \"techniques in positron emission tomography},\"\n \"journal={Physics in Medicine and Biology},\"\n \"volume={61},\"\n \"number={22},\"\n \"pages={7975},\"\n \"url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},\"\n \"doi={https://doi.org/10.1088/0031-9155/61/22/7975},\"\n \"year={2016},\"\n \"}\"\n ),\n \"description\": \"PETPVC software implementation publication\",\n \"tags\": [\"implementation\"],\n }\n ]\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(outputs[\"out_file\"]):\n method_name = self.inputs.pvc.lower()\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=f\"_{method_name}_pvc\"\n )\n\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_fname(\n self, basename, cwd=None, suffix=None, change_ext=True, ext=\".nii.gz\"\n ):\n \"\"\"Generate a filename based on the given parameters.\n\n The filename will take the form: cwd/basename<suffix><ext>.\n If change_ext is True, it will use the extensions specified in\n <instance>inputs.output_type.\n\n Parameters\n ----------\n basename : str\n Filename to base the new filename on.\n cwd : str\n Path to prefix to the new filename. (default is os.getcwd())\n suffix : str\n Suffix to add to the `basename`. (defaults is '' )\n change_ext : bool\n Flag to change the filename extension to the given `ext`.\n (Default is False)\n\n Returns\n -------\n fname : str\n New filename based on given parameters.\n\n \"\"\"\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = \"\".join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n",
"path": "nipype/interfaces/petpvc.py"
}
] | [
{
"content": "# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"PETPVC is a toolbox for partial volume correction in positron emission tomography.\"\"\"\nimport os\n\nfrom .base import (\n TraitedSpec,\n CommandLineInputSpec,\n CommandLine,\n File,\n isdefined,\n traits,\n)\nfrom ..utils.filemanip import fname_presuffix\nfrom ..external.due import BibTeX\n\npvc_methods = [\n \"GTM\",\n \"IY\",\n \"IY+RL\",\n \"IY+VC\",\n \"LABBE\",\n \"LABBE+MTC\",\n \"LABBE+MTC+RL\",\n \"LABBE+MTC+VC\",\n \"LABBE+RBV\",\n \"LABBE+RBV+RL\",\n \"LABBE+RBV+VC\",\n \"MG\",\n \"MG+RL\",\n \"MG+VC\",\n \"MTC\",\n \"MTC+RL\",\n \"MTC+VC\",\n \"RBV\",\n \"RBV+RL\",\n \"RBV+VC\",\n \"RL\",\n \"VC\",\n \"STC\",\n]\n\n\nclass PETPVCInputSpec(CommandLineInputSpec):\n in_file = File(desc=\"PET image file\", exists=True, mandatory=True, argstr=\"-i %s\")\n out_file = File(desc=\"Output file\", genfile=True, hash_files=False, argstr=\"-o %s\")\n mask_file = File(\n desc=\"Mask image file\", exists=True, mandatory=True, argstr=\"-m %s\"\n )\n pvc = traits.Enum(\n pvc_methods,\n mandatory=True,\n argstr=\"-p %s\",\n desc=\"\"\"\\\nDesired PVC method:\n\n * Geometric transfer matrix -- ``GTM``\n * Labbe approach -- ``LABBE``\n * Richardson-Lucy -- ``RL``\n * Van-Cittert -- ``VC``\n * Region-based voxel-wise correction -- ``RBV``\n * RBV with Labbe -- ``LABBE+RBV``\n * RBV with Van-Cittert -- ``RBV+VC``\n * RBV with Richardson-Lucy -- ``RBV+RL``\n * RBV with Labbe and Van-Cittert -- ``LABBE+RBV+VC``\n * RBV with Labbe and Richardson-Lucy -- ``LABBE+RBV+RL``\n * Multi-target correction -- ``MTC``\n * MTC with Labbe -- ``LABBE+MTC``\n * MTC with Van-Cittert -- ``MTC+VC``\n * MTC with Richardson-Lucy -- ``MTC+RL``\n * MTC with Labbe and Van-Cittert -- ``LABBE+MTC+VC``\n * MTC with Labbe and Richardson-Lucy -- ``LABBE+MTC+RL``\n * Iterative Yang -- ``IY``\n * Iterative Yang with Van-Cittert -- ``IY+VC``\n * Iterative Yang with Richardson-Lucy -- ``IY+RL``\n * Muller Gartner -- ``MG``\n * Muller Gartner with Van-Cittert -- ``MG+VC``\n * Muller Gartner with Richardson-Lucy -- ``MG+RL``\n * Single-target correction -- ``STC``\n\n\"\"\",\n )\n fwhm_x = traits.Float(\n desc=\"The full-width at half maximum in mm along x-axis\",\n mandatory=True,\n argstr=\"-x %.4f\",\n )\n fwhm_y = traits.Float(\n desc=\"The full-width at half maximum in mm along y-axis\",\n mandatory=True,\n argstr=\"-y %.4f\",\n )\n fwhm_z = traits.Float(\n desc=\"The full-width at half maximum in mm along z-axis\",\n mandatory=True,\n argstr=\"-z %.4f\",\n )\n debug = traits.Bool(\n desc=\"Prints debug information\",\n usedefault=True,\n default_value=False,\n argstr=\"-d\",\n )\n n_iter = traits.Int(\n desc=\"Number of iterations\", default_value=10, usedefault=True, argstr=\"-n %d\"\n )\n n_deconv = traits.Int(\n desc=\"Number of deconvolution iterations\",\n default_value=10,\n usedefault=True,\n argstr=\"-k %d\",\n )\n alpha = traits.Float(\n desc=\"Alpha value\", default_value=1.5, usedefault=True, argstr=\"-a %.4f\"\n )\n stop_crit = traits.Float(\n desc=\"Stopping criterion\", default_value=0.01, usedefault=True, argstr=\"-s %.4f\"\n )\n\n\nclass PETPVCOutputSpec(TraitedSpec):\n out_file = File(desc=\"Output file\")\n\n\nclass PETPVC(CommandLine):\n \"\"\"Use PETPVC for partial volume correction of PET images.\n\n PETPVC ([1]_, [2]_) is a software from the Nuclear Medicine Department\n of the UCL University Hospital, London, UK.\n\n Examples\n --------\n >>> from ..testing import example_data\n >>> #TODO get data for PETPVC\n >>> pvc = PETPVC()\n >>> pvc.inputs.in_file = 'pet.nii.gz'\n >>> pvc.inputs.mask_file = 'tissues.nii.gz'\n >>> pvc.inputs.out_file = 'pet_pvc_rbv.nii.gz'\n >>> pvc.inputs.pvc = 'RBV'\n >>> pvc.inputs.fwhm_x = 2.0\n >>> pvc.inputs.fwhm_y = 2.0\n >>> pvc.inputs.fwhm_z = 2.0\n >>> outs = pvc.run() #doctest: +SKIP\n\n References\n ----------\n .. [1] K. Erlandsson, I. Buvat, P. H. Pretorius, B. A. Thomas, and B. F. Hutton,\n \"A review of partial volume correction techniques for emission tomography\n and their applications in neurology, cardiology and oncology,\" Phys. Med.\n Biol., vol. 57, no. 21, p. R119, 2012.\n .. [2] https://github.com/UCL/PETPVC\n\n \"\"\"\n\n input_spec = PETPVCInputSpec\n output_spec = PETPVCOutputSpec\n _cmd = \"petpvc\"\n\n _references = [\n {\n \"entry\": BibTeX(\n \"@article{0031-9155-61-22-7975,\"\n \"author={Benjamin A Thomas and Vesna Cuplov and Alexandre Bousse and \"\n \"Adriana Mendes and Kris Thielemans and Brian F Hutton and Kjell Erlandsson},\"\n \"title={PETPVC: a toolbox for performing partial volume correction \"\n \"techniques in positron emission tomography},\"\n \"journal={Physics in Medicine and Biology},\"\n \"volume={61},\"\n \"number={22},\"\n \"pages={7975},\"\n \"url={http://stacks.iop.org/0031-9155/61/i=22/a=7975},\"\n \"doi={https://doi.org/10.1088/0031-9155/61/22/7975},\"\n \"year={2016},\"\n \"}\"\n ),\n \"description\": \"PETPVC software implementation publication\",\n \"tags\": [\"implementation\"],\n }\n ]\n\n def _list_outputs(self):\n outputs = self.output_spec().get()\n outputs[\"out_file\"] = self.inputs.out_file\n if not isdefined(outputs[\"out_file\"]):\n method_name = self.inputs.pvc.lower()\n outputs[\"out_file\"] = self._gen_fname(\n self.inputs.in_file, suffix=f\"_{method_name}_pvc\"\n )\n\n outputs[\"out_file\"] = os.path.abspath(outputs[\"out_file\"])\n return outputs\n\n def _gen_fname(\n self, basename, cwd=None, suffix=None, change_ext=True, ext=\".nii.gz\"\n ):\n \"\"\"Generate a filename based on the given parameters.\n\n The filename will take the form: cwd/basename<suffix><ext>.\n If change_ext is True, it will use the extensions specified in\n <instance>inputs.output_type.\n\n Parameters\n ----------\n basename : str\n Filename to base the new filename on.\n cwd : str\n Path to prefix to the new filename. (default is os.getcwd())\n suffix : str\n Suffix to add to the `basename`. (defaults is '' )\n change_ext : bool\n Flag to change the filename extension to the given `ext`.\n (Default is False)\n\n Returns\n -------\n fname : str\n New filename based on given parameters.\n\n \"\"\"\n if basename == \"\":\n msg = \"Unable to generate filename for command %s. \" % self.cmd\n msg += \"basename is not set!\"\n raise ValueError(msg)\n if cwd is None:\n cwd = os.getcwd()\n if change_ext:\n if suffix:\n suffix = \"\".join((suffix, ext))\n else:\n suffix = ext\n if suffix is None:\n suffix = \"\"\n fname = fname_presuffix(basename, suffix=suffix, use_ext=False, newpath=cwd)\n return fname\n\n def _gen_filename(self, name):\n if name == \"out_file\":\n return self._list_outputs()[\"out_file\"]\n return None\n",
"path": "nipype/interfaces/petpvc.py"
}
] | diff --git a/nipype/interfaces/petpvc.py b/nipype/interfaces/petpvc.py
index dbd1d805ed..ac388f6b51 100644
--- a/nipype/interfaces/petpvc.py
+++ b/nipype/interfaces/petpvc.py
@@ -37,6 +37,7 @@
"RBV+VC",
"RL",
"VC",
+ "STC",
]
@@ -75,6 +76,7 @@ class PETPVCInputSpec(CommandLineInputSpec):
* Muller Gartner -- ``MG``
* Muller Gartner with Van-Cittert -- ``MG+VC``
* Muller Gartner with Richardson-Lucy -- ``MG+RL``
+ * Single-target correction -- ``STC``
""",
)
| ENH: add STC partial volume correction to PETPVC interface
### Summary
Partial Volume Correction using Single-target correction (STC) has been added to PETPVC since the Nipype PETPVC interface was created, and it would therefore be ideal if this could be added to the interface as well.
### Actual behavior
The interface should include the 'STC' option for the 'pvc' flag.
### Expected behavior
### How to replicate the behavior
### Script/Workflow details
Please put URL to code or code here (if not too long).
### Platform details:
<!-- Please run the following code from your shell and place the output between the triple ticks, below.
python -c "import nipype; from pprint import pprint; pprint(nipype.get_info())"
-->
```
```
### Execution environment
Choose one
- Container [Tag: ???]
- My python environment inside container [Base Tag: ???]
- My python environment outside container
|
Pyomo__pyomo-797 | [
{
"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n#\n# Problem Writer for GAMS Format Files\n#\n\nfrom six import StringIO, string_types, iteritems\nfrom six.moves import xrange\n\nfrom pyutilib.misc import PauseGC\n\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.expr.numvalue import (\n is_fixed, value, as_numeric, native_types, native_numeric_types)\nfrom pyomo.core.base import (\n SymbolMap, ShortNameLabeler, NumericLabeler, Block, Constraint, Expression,\n Objective, Var, Param, minimize, Suffix, SortComponents)\nfrom pyomo.core.base.component import ActiveComponent\nfrom pyomo.core.kernel.base import ICategorizedObject\nfrom pyomo.opt import ProblemFormat\nfrom pyomo.opt.base import AbstractProblemWriter, WriterFactory\nfrom pyomo.repn.util import valid_expr_ctypes_minlp, \\\n valid_active_ctypes_minlp\n\nimport logging\n\nlogger = logging.getLogger('pyomo.core')\n\n#\n# A visitor pattern that creates a string for an expression\n# that is compatible with the GAMS syntax.\n#\nclass ToGamsVisitor(EXPR.ExpressionValueVisitor):\n\n def __init__(self, smap, treechecker):\n super(ToGamsVisitor, self).__init__()\n self.smap = smap\n self.treechecker = treechecker\n\n def visit(self, node, values):\n \"\"\" Visit nodes that have been expanded \"\"\"\n tmp = []\n for i,val in enumerate(values):\n arg = node._args_[i]\n\n if arg is None:\n tmp.append('Undefined')\n elif arg.__class__ in native_numeric_types:\n if arg < 0:\n # Wrap negative values in parens to avoid double operator\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif arg.__class__ in native_types:\n tmp.append(\"'{0}'\".format(val))\n elif arg.is_variable_type():\n if arg.is_fixed():\n # bind fixed var values in parens to avoid double negatives\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif (arg.is_expression_type() and\n node._precedence() < arg._precedence()):\n tmp.append(\"({0})\".format(val))\n else:\n tmp.append(val)\n\n if node.__class__ is EXPR.PowExpression:\n # If the exponent is a positive integer, use the power() function.\n # Otherwise, use the ** operator.\n exponent = node.arg(1)\n if (exponent.__class__ in native_numeric_types and\n exponent == int(exponent)):\n return \"power({0}, {1})\".format(tmp[0], tmp[1])\n else:\n return \"{0} ** {1}\".format(tmp[0], tmp[1])\n else:\n return node._to_string(tmp, None, self.smap, True)\n\n def visiting_potential_leaf(self, node):\n \"\"\"\n Visiting a potential leaf.\n\n Return True if the node is not expanded.\n \"\"\"\n if node is None:\n return True, None\n\n if node.__class__ in native_types:\n return True, str(node)\n\n if node.is_expression_type():\n # we will descend into this, so type checking will happen later\n if node.is_component_type():\n self.treechecker(node)\n return False, None\n\n if node.is_component_type():\n if self.ctype(node) not in valid_expr_ctypes_minlp:\n # Make sure all components in active constraints\n # are basic ctypes we know how to deal with.\n raise RuntimeError(\n \"Unallowable component '%s' of type %s found in an active \"\n \"constraint or objective.\\nThe GAMS writer cannot export \"\n \"expressions with this component type.\"\n % (node.name, self.ctype(node).__name__))\n if self.ctype(node) is not Var:\n # For these, make sure it's on the right model. We can check\n # Vars later since they don't disappear from the expressions\n self.treechecker(node)\n\n if node.is_variable_type():\n if node.fixed:\n return True, str(value(node))\n label = self.smap.getSymbol(node)\n return True, label\n\n return True, str(value(node))\n\n def ctype(self, comp):\n if isinstance(comp, ICategorizedObject):\n return comp.ctype\n else:\n return comp.type()\n\n\ndef expression_to_string(expr, treechecker, labeler=None, smap=None):\n if labeler is not None:\n if smap is None:\n smap = SymbolMap()\n smap.default_labeler = labeler\n visitor = ToGamsVisitor(smap, treechecker)\n return visitor.dfs_postorder_stack(expr)\n\n\nclass Categorizer(object):\n \"\"\"Class for representing categorized variables.\n\n Given a list of variable names and a symbol map, categorizes the variable\n names into the categories: binary, ints, positive and reals.\n\n \"\"\"\n\n def __init__(self, var_list, symbol_map):\n self.binary = []\n self.ints = []\n self.positive = []\n self.reals = []\n\n # categorize variables\n for var in var_list:\n v = symbol_map.getObject(var)\n if v.is_binary():\n self.binary.append(var)\n elif v.is_integer():\n if (v.has_lb() and (value(v.lb) >= 0)) and \\\n (v.has_ub() and (value(v.ub) <= 1)):\n self.binary.append(var)\n else:\n self.ints.append(var)\n elif value(v.lb) == 0:\n self.positive.append(var)\n else:\n self.reals.append(var)\n\n def __iter__(self):\n \"\"\"Iterate over all variables.\n\n Yield a tuple containing the variables category and its name.\n \"\"\"\n for category in ['binary', 'ints', 'positive', 'reals']:\n var_list = getattr(self, category)\n for var_name in var_list:\n yield category, var_name\n\n\nclass StorageTreeChecker(object):\n def __init__(self, model):\n # blocks are hashable so we can use a normal set\n self.tree = {model}\n self.model = model\n # add everything above the model\n pb = self.parent_block(model)\n while pb is not None:\n self.tree.add(pb)\n pb = self.parent_block(pb)\n\n def __call__(self, comp, exception_flag=True):\n if comp is self.model:\n return True\n\n # walk up tree until there are no more parents\n seen = set()\n pb = self.parent_block(comp)\n while pb is not None:\n if pb in self.tree:\n self.tree.update(seen)\n return True\n seen.add(pb)\n pb = self.parent_block(pb)\n\n if exception_flag:\n self.raise_error(comp)\n else:\n return False\n\n def parent_block(self, comp):\n if isinstance(comp, ICategorizedObject):\n parent = comp.parent\n while (parent is not None) and \\\n (not parent._is_heterogeneous_container):\n parent = parent.parent\n return parent\n else:\n return comp.parent_block()\n\n def raise_error(self, comp):\n raise RuntimeError(\n \"GAMS writer: found component '%s' not on same model tree.\\n\"\n \"All components must have the same parent model.\" % comp.name)\n\n\ndef split_long_line(line):\n \"\"\"\n GAMS has an 80,000 character limit for lines, so split as many\n times as needed so as to not have illegal lines.\n \"\"\"\n new_lines = ''\n while len(line) > 80000:\n i = 80000\n while line[i] != ' ':\n # Walk backwards to find closest space,\n # where it is safe to split to a new line\n if i < 0:\n raise RuntimeError(\n \"Found an 80,000+ character string with no spaces\")\n i -= 1\n new_lines += line[:i] + '\\n'\n line = line[i + 1:]\n new_lines += line\n return new_lines\n\n\ndef _get_bound(exp):\n if exp is None:\n return None\n if is_fixed(exp):\n return value(exp)\n raise ValueError(\"non-fixed bound or weight: \" + str(exp))\n\n\[email protected]('gams', 'Generate the corresponding GAMS file')\nclass ProblemWriter_gams(AbstractProblemWriter):\n\n def __init__(self):\n AbstractProblemWriter.__init__(self, ProblemFormat.gams)\n\n def __call__(self,\n model,\n output_filename,\n solver_capability,\n io_options):\n \"\"\"\n Write a model in the GAMS modeling language format.\n\n Keyword Arguments\n -----------------\n output_filename: str\n Name of file to write GAMS model to. Optionally pass a file-like\n stream and the model will be written to that instead.\n io_options: dict\n - warmstart=True\n Warmstart by initializing model's variables to their values.\n - symbolic_solver_labels=False\n Use full Pyomo component names rather than\n shortened symbols (slower, but useful for debugging).\n - labeler=None\n Custom labeler. Incompatible with symbolic_solver_labels.\n - solver=None\n If None, GAMS will use default solver for model type.\n - mtype=None\n Model type. If None, will chose from lp, nlp, mip, and minlp.\n - add_options=None\n List of additional lines to write directly\n into model file before the solve statement.\n For model attributes, <model name> is GAMS_MODEL.\n - skip_trivial_constraints=False\n Skip writing constraints whose body section is fixed.\n - file_determinism=1\n | How much effort do we want to put into ensuring the\n | GAMS file is written deterministically for a Pyomo model:\n | 0 : None\n | 1 : sort keys of indexed components (default)\n | 2 : sort keys AND sort names (over declaration order)\n - put_results=None\n Filename for optionally writing solution values and\n marginals to (put_results).dat, and solver statuses\n to (put_results + 'stat').dat.\n \"\"\"\n\n # Make sure not to modify the user's dictionary,\n # they may be reusing it outside of this call\n io_options = dict(io_options)\n\n # Use full Pyomo component names rather than\n # shortened symbols (slower, but useful for debugging).\n symbolic_solver_labels = io_options.pop(\"symbolic_solver_labels\", False)\n\n # Custom labeler option. Incompatible with symbolic_solver_labels.\n labeler = io_options.pop(\"labeler\", None)\n\n # If None, GAMS will use default solver for model type.\n solver = io_options.pop(\"solver\", None)\n\n # If None, will chose from lp, nlp, mip, and minlp.\n mtype = io_options.pop(\"mtype\", None)\n\n # Lines to add before solve statement.\n add_options = io_options.pop(\"add_options\", None)\n\n # Skip writing constraints whose body section is\n # fixed (i.e., no variables)\n skip_trivial_constraints = \\\n io_options.pop(\"skip_trivial_constraints\", False)\n\n # How much effort do we want to put into ensuring the\n # GAMS file is written deterministically for a Pyomo model:\n # 0 : None\n # 1 : sort keys of indexed components (default)\n # 2 : sort keys AND sort names (over declaration order)\n file_determinism = io_options.pop(\"file_determinism\", 1)\n sorter_map = {0:SortComponents.unsorted,\n 1:SortComponents.deterministic,\n 2:SortComponents.sortBoth}\n sort = sorter_map[file_determinism]\n\n # Warmstart by initializing model's variables to their values.\n warmstart = io_options.pop(\"warmstart\", True)\n\n # Filename for optionally writing solution values and marginals\n # Set to True by GAMSSolver\n put_results = io_options.pop(\"put_results\", None)\n\n if len(io_options):\n raise ValueError(\n \"GAMS writer passed unrecognized io_options:\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\"\n % (k,v) for k,v in iteritems(io_options)))\n\n if solver is not None and solver.upper() not in valid_solvers:\n raise ValueError(\n \"GAMS writer passed unrecognized solver: %s\" % solver)\n\n if mtype is not None:\n valid_mtypes = set([\n 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp',\n 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp'])\n if mtype.lower() not in valid_mtypes:\n raise ValueError(\"GAMS writer passed unrecognized \"\n \"model type: %s\" % mtype)\n if (solver is not None and\n mtype.upper() not in valid_solvers[solver.upper()]):\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for given model type (%s)\"\n % (solver, mtype))\n\n if output_filename is None:\n output_filename = model.name + \".gms\"\n\n if symbolic_solver_labels and (labeler is not None):\n raise ValueError(\"GAMS writer: Using both the \"\n \"'symbolic_solver_labels' and 'labeler' \"\n \"I/O options is forbidden\")\n\n if symbolic_solver_labels:\n var_labeler = con_labeler = ShortNameLabeler(63, '_')\n elif labeler is None:\n var_labeler = NumericLabeler('x')\n con_labeler = NumericLabeler('c')\n else:\n var_labeler = con_labeler = labeler\n\n var_list = []\n\n def var_recorder(obj):\n ans = var_labeler(obj)\n try:\n if obj.is_variable_type():\n var_list.append(ans)\n except:\n pass\n return ans\n\n def var_label(obj):\n #if obj.is_fixed():\n # return str(value(obj))\n return symbolMap.getSymbol(obj, var_recorder)\n\n symbolMap = SymbolMap(var_label)\n\n # when sorting, there are a non-trivial number of\n # temporary objects created. these all yield\n # non-circular references, so disable GC - the\n # overhead is non-trivial, and because references\n # are non-circular, everything will be collected\n # immediately anyway.\n with PauseGC() as pgc:\n try:\n if isinstance(output_filename, string_types):\n output_file = open(output_filename, \"w\")\n else:\n # Support passing of stream such as a StringIO\n # on which to write the model file\n output_file = output_filename\n self._write_model(\n model=model,\n output_file=output_file,\n solver_capability=solver_capability,\n var_list=var_list,\n var_label=var_label,\n symbolMap=symbolMap,\n con_labeler=con_labeler,\n sort=sort,\n skip_trivial_constraints=skip_trivial_constraints,\n warmstart=warmstart,\n solver=solver,\n mtype=mtype,\n add_options=add_options,\n put_results=put_results\n )\n finally:\n if isinstance(output_filename, string_types):\n output_file.close()\n\n return output_filename, symbolMap\n\n def _write_model(self,\n model,\n output_file,\n solver_capability,\n var_list,\n var_label,\n symbolMap,\n con_labeler,\n sort,\n skip_trivial_constraints,\n warmstart,\n solver,\n mtype,\n add_options,\n put_results):\n constraint_names = []\n ConstraintIO = StringIO()\n linear = True\n linear_degree = set([0,1])\n\n # Make sure there are no strange ActiveComponents. The expression\n # walker will handle strange things in constraints later.\n model_ctypes = model.collect_ctypes(active=True)\n invalids = set()\n for t in (model_ctypes - valid_active_ctypes_minlp):\n if issubclass(t, ActiveComponent):\n invalids.add(t)\n if len(invalids):\n invalids = [t.__name__ for t in invalids]\n raise RuntimeError(\n \"Unallowable active component(s) %s.\\nThe GAMS writer cannot \"\n \"export models with this component type.\" %\n \", \".join(invalids))\n\n tc = StorageTreeChecker(model)\n\n # Walk through the model and generate the constraint definition\n # for all active constraints. Any Vars / Expressions that are\n # encountered will be added to the var_list due to the labeler\n # defined above.\n for con in model.component_data_objects(Constraint,\n active=True,\n sort=sort):\n\n if not con.has_lb() and not con.has_ub():\n assert not con.equality\n continue # non-binding, so skip\n\n con_body = as_numeric(con.body)\n if skip_trivial_constraints and con_body.is_fixed():\n continue\n if linear:\n if con_body.polynomial_degree() not in linear_degree:\n linear = False\n\n cName = symbolMap.getSymbol(con, con_labeler)\n if con.equality:\n constraint_names.append('%s' % cName)\n ConstraintIO.write('%s.. %s =e= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n else:\n if con.has_lb():\n constraint_names.append('%s_lo' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n _get_bound(con.lower),\n expression_to_string(con_body, tc, smap=symbolMap)\n ))\n if con.has_ub():\n constraint_names.append('%s_hi' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n\n obj = list(model.component_data_objects(Objective,\n active=True,\n sort=sort))\n if len(obj) != 1:\n raise RuntimeError(\n \"GAMS writer requires exactly one active objective (found %s)\"\n % (len(obj)))\n obj = obj[0]\n if linear:\n if obj.expr.polynomial_degree() not in linear_degree:\n linear = False\n oName = symbolMap.getSymbol(obj, con_labeler)\n constraint_names.append(oName)\n ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\\n' % (\n oName,\n expression_to_string(obj.expr, tc, smap=symbolMap)\n ))\n\n # Categorize the variables that we found\n categorized_vars = Categorizer(var_list, symbolMap)\n\n # Write the GAMS model\n # $offdigit ignores extra precise digits instead of erroring\n output_file.write(\"$offdigit\\n\\n\")\n output_file.write(\"EQUATIONS\\n\\t\")\n output_file.write(\"\\n\\t\".join(constraint_names))\n if categorized_vars.binary:\n output_file.write(\";\\n\\nBINARY VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.binary))\n if categorized_vars.ints:\n output_file.write(\";\\n\\nINTEGER VARIABLES\")\n output_file.write(\"\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.ints))\n if categorized_vars.positive:\n output_file.write(\";\\n\\nPOSITIVE VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.positive))\n output_file.write(\";\\n\\nVARIABLES\\n\\tGAMS_OBJECTIVE\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.reals))\n output_file.write(\";\\n\\n\")\n\n for line in ConstraintIO.getvalue().splitlines():\n if len(line) > 80000:\n line = split_long_line(line)\n output_file.write(line + \"\\n\")\n\n output_file.write(\"\\n\")\n\n warn_int_bounds = False\n for category, var_name in categorized_vars:\n var = symbolMap.getObject(var_name)\n tc(var)\n if category == 'positive':\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'ints':\n if not var.has_lb():\n warn_int_bounds = True\n # GAMS doesn't allow -INF lower bound for ints\n logger.warning(\"Lower bound for integer variable %s set \"\n \"to -1.0E+100.\" % var.name)\n output_file.write(\"%s.lo = -1.0E+100;\\n\" % (var_name))\n elif value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if not var.has_ub():\n warn_int_bounds = True\n # GAMS has an option value called IntVarUp that is the\n # default upper integer bound, which it applies if the\n # integer's upper bound is INF. This option maxes out at\n # 2147483647, so we can go higher by setting the bound.\n logger.warning(\"Upper bound for integer variable %s set \"\n \"to +1.0E+100.\" % var.name)\n output_file.write(\"%s.up = +1.0E+100;\\n\" % (var_name))\n else:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'binary':\n if var.has_lb() and value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub() and value(var.ub) != 1:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'reals':\n if var.has_lb():\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n else:\n raise KeyError('Category %s not supported' % category)\n if warmstart and var.value is not None:\n output_file.write(\"%s.l = %s;\\n\" % (var_name, var.value))\n\n if warn_int_bounds:\n logger.warning(\n \"GAMS requires finite bounds for integer variables. 1.0E100 \"\n \"is as extreme as GAMS will define, and should be enough to \"\n \"appear unbounded. If the solver cannot handle this bound, \"\n \"explicitly set a smaller bound on the pyomo model, or try a \"\n \"different GAMS solver.\")\n\n model_name = \"GAMS_MODEL\"\n output_file.write(\"\\nMODEL %s /all/ ;\\n\" % model_name)\n\n if mtype is None:\n mtype = ('lp','nlp','mip','minlp')[\n (0 if linear else 1) +\n (2 if (categorized_vars.binary or categorized_vars.ints)\n else 0)]\n\n if solver is not None:\n if mtype.upper() not in valid_solvers[solver.upper()]:\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for model type (%s)\"\n % (solver, mtype))\n output_file.write(\"option %s=%s;\\n\" % (mtype, solver))\n\n if add_options is not None:\n output_file.write(\"\\n* START USER ADDITIONAL OPTIONS\\n\")\n for line in add_options:\n output_file.write('\\n' + line)\n output_file.write(\"\\n\\n* END USER ADDITIONAL OPTIONS\\n\\n\")\n\n output_file.write(\n \"SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\\n\\n\"\n % ( model_name,\n mtype,\n 'min' if obj.sense == minimize else 'max'))\n\n # Set variables to store certain statuses and attributes\n stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR',\n 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE']\n output_file.write(\"Scalars MODELSTAT 'model status', \"\n \"SOLVESTAT 'solve status';\\n\")\n output_file.write(\"MODELSTAT = %s.modelstat;\\n\" % model_name)\n output_file.write(\"SOLVESTAT = %s.solvestat;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar OBJEST 'best objective', \"\n \"OBJVAL 'objective value';\\n\")\n output_file.write(\"OBJEST = %s.objest;\\n\" % model_name)\n output_file.write(\"OBJVAL = %s.objval;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMVAR 'number of variables';\\n\")\n output_file.write(\"NUMVAR = %s.numvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMEQU 'number of equations';\\n\")\n output_file.write(\"NUMEQU = %s.numequ\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMDVAR 'number of discrete variables';\\n\")\n output_file.write(\"NUMDVAR = %s.numdvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMNZ 'number of nonzeros';\\n\")\n output_file.write(\"NUMNZ = %s.numnz\\n\\n\" % model_name)\n\n output_file.write(\"Scalar ETSOLVE 'time to execute solve statement';\\n\")\n output_file.write(\"ETSOLVE = %s.etsolve\\n\\n\" % model_name)\n\n if put_results is not None:\n results = put_results + '.dat'\n output_file.write(\"\\nfile results /'%s'/;\" % results)\n output_file.write(\"\\nresults.nd=15;\")\n output_file.write(\"\\nresults.nw=21;\")\n output_file.write(\"\\nput results;\")\n output_file.write(\"\\nput 'SYMBOL : LEVEL : MARGINAL' /;\")\n for var in var_list:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (var, var, var))\n for con in constraint_names:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (con, con, con))\n output_file.write(\"\\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l \"\n \"GAMS_OBJECTIVE.m;\\n\")\n\n statresults = put_results + 'stat.dat'\n output_file.write(\"\\nfile statresults /'%s'/;\" % statresults)\n output_file.write(\"\\nstatresults.nd=15;\")\n output_file.write(\"\\nstatresults.nw=21;\")\n output_file.write(\"\\nput statresults;\")\n output_file.write(\"\\nput 'SYMBOL : VALUE' /;\")\n for stat in stat_vars:\n output_file.write(\"\\nput '%s' %s /;\\n\" % (stat, stat))\n\n\nvalid_solvers = {\n'ALPHAECP': {'MINLP','MIQCP'},\n'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BDMLP': {'LP','MIP','RMIP'},\n'BDMLPD': {'LP','RMIP'},\n'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BONMIN': {'MINLP','MIQCP'},\n'BONMINH': {'MINLP','MIQCP'},\n'CBC': {'LP','MIP','RMIP'},\n'COINBONMIN': {'MINLP','MIQCP'},\n'COINCBC': {'LP','MIP','RMIP'},\n'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPOPTIMIZER': {'MIP','MINLP','MIQCP'},\n'DE': {'EMP'},\n'DECIS': {'EMP'},\n'DECISC': {'LP'},\n'DECISM': {'LP'},\n'DICOPT': {'MINLP','MIQCP'},\n'DICOPTD': {'MINLP','MIQCP'},\n'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GLOMIQO': {'QCP','MIQCP','RMIQCP'},\n'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'JAMS': {'EMP'},\n'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'},\n'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LOGMIP': {'EMP'},\n'LS': {'LP','RMIP'},\n'MILES': {'MCP'},\n'MILESE': {'MCP'},\n'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'},\n'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'MPSGE': {},\n'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'NLPEC': {'MCP','MPEC','RMPEC'},\n'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'OSICPLEX': {'LP','MIP','RMIP'},\n'OSIGUROBI': {'LP','MIP','RMIP'},\n'OSIMOSEK': {'LP','MIP','RMIP'},\n'OSISOPLEX': {'LP','RMIP'},\n'OSIXPRESS': {'LP','MIP','RMIP'},\n'PATH': {'MCP','CNS'},\n'PATHC': {'MCP','CNS'},\n'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'QUADMINOS': {'LP'},\n'SBB': {'MINLP','MIQCP'},\n'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'SOPLEX': {'LP','RMIP'},\n'XA': {'LP','MIP','RMIP'},\n'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}\n}\n",
"path": "pyomo/repn/plugins/gams_writer.py"
}
] | [
{
"content": "# ___________________________________________________________________________\n#\n# Pyomo: Python Optimization Modeling Objects\n# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC\n# Under the terms of Contract DE-NA0003525 with National Technology and\n# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain\n# rights in this software.\n# This software is distributed under the 3-clause BSD License.\n# ___________________________________________________________________________\n\n#\n# Problem Writer for GAMS Format Files\n#\n\nfrom six import StringIO, string_types, iteritems\nfrom six.moves import xrange\n\nfrom pyutilib.misc import PauseGC\n\nfrom pyomo.core.expr import current as EXPR\nfrom pyomo.core.expr.numvalue import (\n is_fixed, value, as_numeric, native_types, native_numeric_types)\nfrom pyomo.core.base import (\n SymbolMap, ShortNameLabeler, NumericLabeler, Block, Constraint, Expression,\n Objective, Var, Param, minimize, Suffix, SortComponents)\nfrom pyomo.core.base.component import ActiveComponent\nfrom pyomo.core.kernel.base import ICategorizedObject\nfrom pyomo.opt import ProblemFormat\nfrom pyomo.opt.base import AbstractProblemWriter, WriterFactory\nfrom pyomo.repn.util import valid_expr_ctypes_minlp, \\\n valid_active_ctypes_minlp\n\nimport logging\n\nlogger = logging.getLogger('pyomo.core')\n\n#\n# A visitor pattern that creates a string for an expression\n# that is compatible with the GAMS syntax.\n#\nclass ToGamsVisitor(EXPR.ExpressionValueVisitor):\n\n def __init__(self, smap, treechecker):\n super(ToGamsVisitor, self).__init__()\n self.smap = smap\n self.treechecker = treechecker\n\n def visit(self, node, values):\n \"\"\" Visit nodes that have been expanded \"\"\"\n tmp = []\n for i,val in enumerate(values):\n arg = node._args_[i]\n\n if arg is None:\n tmp.append('Undefined')\n elif arg.__class__ in native_numeric_types:\n if arg < 0:\n # Wrap negative values in parens to avoid double operator\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif arg.__class__ in native_types:\n tmp.append(\"'{0}'\".format(val))\n elif arg.is_variable_type():\n if arg.is_fixed():\n # bind fixed var values in parens to avoid double negatives\n tmp.append(\"(%s)\" % val)\n else:\n tmp.append(val)\n elif (arg.is_expression_type() and\n node._precedence() < arg._precedence()):\n tmp.append(\"({0})\".format(val))\n else:\n tmp.append(val)\n\n if node.__class__ is EXPR.PowExpression:\n # If the exponent is a positive integer, use the power() function.\n # Otherwise, use the ** operator.\n exponent = node.arg(1)\n if (exponent.__class__ in native_numeric_types and\n exponent == int(exponent)):\n return \"power({0}, {1})\".format(tmp[0], tmp[1])\n else:\n return \"{0} ** {1}\".format(tmp[0], tmp[1])\n else:\n return node._to_string(tmp, None, self.smap, True)\n\n def visiting_potential_leaf(self, node):\n \"\"\"\n Visiting a potential leaf.\n\n Return True if the node is not expanded.\n \"\"\"\n if node is None:\n return True, None\n\n if node.__class__ in native_types:\n return True, str(node)\n\n if node.is_expression_type():\n # we will descend into this, so type checking will happen later\n if node.is_component_type():\n self.treechecker(node)\n return False, None\n\n if node.is_component_type():\n if self.ctype(node) not in valid_expr_ctypes_minlp:\n # Make sure all components in active constraints\n # are basic ctypes we know how to deal with.\n raise RuntimeError(\n \"Unallowable component '%s' of type %s found in an active \"\n \"constraint or objective.\\nThe GAMS writer cannot export \"\n \"expressions with this component type.\"\n % (node.name, self.ctype(node).__name__))\n if self.ctype(node) is not Var:\n # For these, make sure it's on the right model. We can check\n # Vars later since they don't disappear from the expressions\n self.treechecker(node)\n\n if node.is_variable_type():\n if node.fixed:\n return True, str(value(node))\n label = self.smap.getSymbol(node)\n return True, label\n\n return True, str(value(node))\n\n def ctype(self, comp):\n if isinstance(comp, ICategorizedObject):\n return comp.ctype\n else:\n return comp.type()\n\n\ndef expression_to_string(expr, treechecker, labeler=None, smap=None):\n if labeler is not None:\n if smap is None:\n smap = SymbolMap()\n smap.default_labeler = labeler\n visitor = ToGamsVisitor(smap, treechecker)\n return visitor.dfs_postorder_stack(expr)\n\n\nclass Categorizer(object):\n \"\"\"Class for representing categorized variables.\n\n Given a list of variable names and a symbol map, categorizes the variable\n names into the categories: binary, ints, positive and reals.\n\n \"\"\"\n\n def __init__(self, var_list, symbol_map):\n self.binary = []\n self.ints = []\n self.positive = []\n self.reals = []\n\n # categorize variables\n for var in var_list:\n v = symbol_map.getObject(var)\n if v.is_binary():\n self.binary.append(var)\n elif v.is_integer():\n if (v.has_lb() and (value(v.lb) >= 0)) and \\\n (v.has_ub() and (value(v.ub) <= 1)):\n self.binary.append(var)\n else:\n self.ints.append(var)\n elif value(v.lb) == 0:\n self.positive.append(var)\n else:\n self.reals.append(var)\n\n def __iter__(self):\n \"\"\"Iterate over all variables.\n\n Yield a tuple containing the variables category and its name.\n \"\"\"\n for category in ['binary', 'ints', 'positive', 'reals']:\n var_list = getattr(self, category)\n for var_name in var_list:\n yield category, var_name\n\n\nclass StorageTreeChecker(object):\n def __init__(self, model):\n # blocks are hashable so we can use a normal set\n self.tree = {model}\n self.model = model\n # add everything above the model\n pb = self.parent_block(model)\n while pb is not None:\n self.tree.add(pb)\n pb = self.parent_block(pb)\n\n def __call__(self, comp, exception_flag=True):\n if comp is self.model:\n return True\n\n # walk up tree until there are no more parents\n seen = set()\n pb = self.parent_block(comp)\n while pb is not None:\n if pb in self.tree:\n self.tree.update(seen)\n return True\n seen.add(pb)\n pb = self.parent_block(pb)\n\n if exception_flag:\n self.raise_error(comp)\n else:\n return False\n\n def parent_block(self, comp):\n if isinstance(comp, ICategorizedObject):\n parent = comp.parent\n while (parent is not None) and \\\n (not parent._is_heterogeneous_container):\n parent = parent.parent\n return parent\n else:\n return comp.parent_block()\n\n def raise_error(self, comp):\n raise RuntimeError(\n \"GAMS writer: found component '%s' not on same model tree.\\n\"\n \"All components must have the same parent model.\" % comp.name)\n\n\ndef split_long_line(line):\n \"\"\"\n GAMS has an 80,000 character limit for lines, so split as many\n times as needed so as to not have illegal lines.\n \"\"\"\n new_lines = ''\n while len(line) > 80000:\n i = 80000\n while line[i] != ' ':\n # Walk backwards to find closest space,\n # where it is safe to split to a new line\n if i < 0:\n raise RuntimeError(\n \"Found an 80,000+ character string with no spaces\")\n i -= 1\n new_lines += line[:i] + '\\n'\n # the space will be the first character in the next line,\n # so that the line doesn't start with the comment character '*'\n line = line[i:]\n new_lines += line\n return new_lines\n\n\ndef _get_bound(exp):\n if exp is None:\n return None\n if is_fixed(exp):\n return value(exp)\n raise ValueError(\"non-fixed bound or weight: \" + str(exp))\n\n\[email protected]('gams', 'Generate the corresponding GAMS file')\nclass ProblemWriter_gams(AbstractProblemWriter):\n\n def __init__(self):\n AbstractProblemWriter.__init__(self, ProblemFormat.gams)\n\n def __call__(self,\n model,\n output_filename,\n solver_capability,\n io_options):\n \"\"\"\n Write a model in the GAMS modeling language format.\n\n Keyword Arguments\n -----------------\n output_filename: str\n Name of file to write GAMS model to. Optionally pass a file-like\n stream and the model will be written to that instead.\n io_options: dict\n - warmstart=True\n Warmstart by initializing model's variables to their values.\n - symbolic_solver_labels=False\n Use full Pyomo component names rather than\n shortened symbols (slower, but useful for debugging).\n - labeler=None\n Custom labeler. Incompatible with symbolic_solver_labels.\n - solver=None\n If None, GAMS will use default solver for model type.\n - mtype=None\n Model type. If None, will chose from lp, nlp, mip, and minlp.\n - add_options=None\n List of additional lines to write directly\n into model file before the solve statement.\n For model attributes, <model name> is GAMS_MODEL.\n - skip_trivial_constraints=False\n Skip writing constraints whose body section is fixed.\n - file_determinism=1\n | How much effort do we want to put into ensuring the\n | GAMS file is written deterministically for a Pyomo model:\n | 0 : None\n | 1 : sort keys of indexed components (default)\n | 2 : sort keys AND sort names (over declaration order)\n - put_results=None\n Filename for optionally writing solution values and\n marginals to (put_results).dat, and solver statuses\n to (put_results + 'stat').dat.\n \"\"\"\n\n # Make sure not to modify the user's dictionary,\n # they may be reusing it outside of this call\n io_options = dict(io_options)\n\n # Use full Pyomo component names rather than\n # shortened symbols (slower, but useful for debugging).\n symbolic_solver_labels = io_options.pop(\"symbolic_solver_labels\", False)\n\n # Custom labeler option. Incompatible with symbolic_solver_labels.\n labeler = io_options.pop(\"labeler\", None)\n\n # If None, GAMS will use default solver for model type.\n solver = io_options.pop(\"solver\", None)\n\n # If None, will chose from lp, nlp, mip, and minlp.\n mtype = io_options.pop(\"mtype\", None)\n\n # Lines to add before solve statement.\n add_options = io_options.pop(\"add_options\", None)\n\n # Skip writing constraints whose body section is\n # fixed (i.e., no variables)\n skip_trivial_constraints = \\\n io_options.pop(\"skip_trivial_constraints\", False)\n\n # How much effort do we want to put into ensuring the\n # GAMS file is written deterministically for a Pyomo model:\n # 0 : None\n # 1 : sort keys of indexed components (default)\n # 2 : sort keys AND sort names (over declaration order)\n file_determinism = io_options.pop(\"file_determinism\", 1)\n sorter_map = {0:SortComponents.unsorted,\n 1:SortComponents.deterministic,\n 2:SortComponents.sortBoth}\n sort = sorter_map[file_determinism]\n\n # Warmstart by initializing model's variables to their values.\n warmstart = io_options.pop(\"warmstart\", True)\n\n # Filename for optionally writing solution values and marginals\n # Set to True by GAMSSolver\n put_results = io_options.pop(\"put_results\", None)\n\n if len(io_options):\n raise ValueError(\n \"GAMS writer passed unrecognized io_options:\\n\\t\" +\n \"\\n\\t\".join(\"%s = %s\"\n % (k,v) for k,v in iteritems(io_options)))\n\n if solver is not None and solver.upper() not in valid_solvers:\n raise ValueError(\n \"GAMS writer passed unrecognized solver: %s\" % solver)\n\n if mtype is not None:\n valid_mtypes = set([\n 'lp', 'qcp', 'nlp', 'dnlp', 'rmip', 'mip', 'rmiqcp', 'rminlp',\n 'miqcp', 'minlp', 'rmpec', 'mpec', 'mcp', 'cns', 'emp'])\n if mtype.lower() not in valid_mtypes:\n raise ValueError(\"GAMS writer passed unrecognized \"\n \"model type: %s\" % mtype)\n if (solver is not None and\n mtype.upper() not in valid_solvers[solver.upper()]):\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for given model type (%s)\"\n % (solver, mtype))\n\n if output_filename is None:\n output_filename = model.name + \".gms\"\n\n if symbolic_solver_labels and (labeler is not None):\n raise ValueError(\"GAMS writer: Using both the \"\n \"'symbolic_solver_labels' and 'labeler' \"\n \"I/O options is forbidden\")\n\n if symbolic_solver_labels:\n var_labeler = con_labeler = ShortNameLabeler(63, '_')\n elif labeler is None:\n var_labeler = NumericLabeler('x')\n con_labeler = NumericLabeler('c')\n else:\n var_labeler = con_labeler = labeler\n\n var_list = []\n\n def var_recorder(obj):\n ans = var_labeler(obj)\n try:\n if obj.is_variable_type():\n var_list.append(ans)\n except:\n pass\n return ans\n\n def var_label(obj):\n #if obj.is_fixed():\n # return str(value(obj))\n return symbolMap.getSymbol(obj, var_recorder)\n\n symbolMap = SymbolMap(var_label)\n\n # when sorting, there are a non-trivial number of\n # temporary objects created. these all yield\n # non-circular references, so disable GC - the\n # overhead is non-trivial, and because references\n # are non-circular, everything will be collected\n # immediately anyway.\n with PauseGC() as pgc:\n try:\n if isinstance(output_filename, string_types):\n output_file = open(output_filename, \"w\")\n else:\n # Support passing of stream such as a StringIO\n # on which to write the model file\n output_file = output_filename\n self._write_model(\n model=model,\n output_file=output_file,\n solver_capability=solver_capability,\n var_list=var_list,\n var_label=var_label,\n symbolMap=symbolMap,\n con_labeler=con_labeler,\n sort=sort,\n skip_trivial_constraints=skip_trivial_constraints,\n warmstart=warmstart,\n solver=solver,\n mtype=mtype,\n add_options=add_options,\n put_results=put_results\n )\n finally:\n if isinstance(output_filename, string_types):\n output_file.close()\n\n return output_filename, symbolMap\n\n def _write_model(self,\n model,\n output_file,\n solver_capability,\n var_list,\n var_label,\n symbolMap,\n con_labeler,\n sort,\n skip_trivial_constraints,\n warmstart,\n solver,\n mtype,\n add_options,\n put_results):\n constraint_names = []\n ConstraintIO = StringIO()\n linear = True\n linear_degree = set([0,1])\n\n # Make sure there are no strange ActiveComponents. The expression\n # walker will handle strange things in constraints later.\n model_ctypes = model.collect_ctypes(active=True)\n invalids = set()\n for t in (model_ctypes - valid_active_ctypes_minlp):\n if issubclass(t, ActiveComponent):\n invalids.add(t)\n if len(invalids):\n invalids = [t.__name__ for t in invalids]\n raise RuntimeError(\n \"Unallowable active component(s) %s.\\nThe GAMS writer cannot \"\n \"export models with this component type.\" %\n \", \".join(invalids))\n\n tc = StorageTreeChecker(model)\n\n # Walk through the model and generate the constraint definition\n # for all active constraints. Any Vars / Expressions that are\n # encountered will be added to the var_list due to the labeler\n # defined above.\n for con in model.component_data_objects(Constraint,\n active=True,\n sort=sort):\n\n if not con.has_lb() and not con.has_ub():\n assert not con.equality\n continue # non-binding, so skip\n\n con_body = as_numeric(con.body)\n if skip_trivial_constraints and con_body.is_fixed():\n continue\n if linear:\n if con_body.polynomial_degree() not in linear_degree:\n linear = False\n\n cName = symbolMap.getSymbol(con, con_labeler)\n if con.equality:\n constraint_names.append('%s' % cName)\n ConstraintIO.write('%s.. %s =e= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n else:\n if con.has_lb():\n constraint_names.append('%s_lo' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n _get_bound(con.lower),\n expression_to_string(con_body, tc, smap=symbolMap)\n ))\n if con.has_ub():\n constraint_names.append('%s_hi' % cName)\n ConstraintIO.write('%s.. %s =l= %s ;\\n' % (\n constraint_names[-1],\n expression_to_string(con_body, tc, smap=symbolMap),\n _get_bound(con.upper)\n ))\n\n obj = list(model.component_data_objects(Objective,\n active=True,\n sort=sort))\n if len(obj) != 1:\n raise RuntimeError(\n \"GAMS writer requires exactly one active objective (found %s)\"\n % (len(obj)))\n obj = obj[0]\n if linear:\n if obj.expr.polynomial_degree() not in linear_degree:\n linear = False\n oName = symbolMap.getSymbol(obj, con_labeler)\n constraint_names.append(oName)\n ConstraintIO.write('%s.. GAMS_OBJECTIVE =e= %s ;\\n' % (\n oName,\n expression_to_string(obj.expr, tc, smap=symbolMap)\n ))\n\n # Categorize the variables that we found\n categorized_vars = Categorizer(var_list, symbolMap)\n\n # Write the GAMS model\n # $offdigit ignores extra precise digits instead of erroring\n output_file.write(\"$offdigit\\n\\n\")\n output_file.write(\"EQUATIONS\\n\\t\")\n output_file.write(\"\\n\\t\".join(constraint_names))\n if categorized_vars.binary:\n output_file.write(\";\\n\\nBINARY VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.binary))\n if categorized_vars.ints:\n output_file.write(\";\\n\\nINTEGER VARIABLES\")\n output_file.write(\"\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.ints))\n if categorized_vars.positive:\n output_file.write(\";\\n\\nPOSITIVE VARIABLES\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.positive))\n output_file.write(\";\\n\\nVARIABLES\\n\\tGAMS_OBJECTIVE\\n\\t\")\n output_file.write(\"\\n\\t\".join(categorized_vars.reals))\n output_file.write(\";\\n\\n\")\n\n for line in ConstraintIO.getvalue().splitlines():\n if len(line) > 80000:\n line = split_long_line(line)\n output_file.write(line + \"\\n\")\n\n output_file.write(\"\\n\")\n\n warn_int_bounds = False\n for category, var_name in categorized_vars:\n var = symbolMap.getObject(var_name)\n tc(var)\n if category == 'positive':\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'ints':\n if not var.has_lb():\n warn_int_bounds = True\n # GAMS doesn't allow -INF lower bound for ints\n logger.warning(\"Lower bound for integer variable %s set \"\n \"to -1.0E+100.\" % var.name)\n output_file.write(\"%s.lo = -1.0E+100;\\n\" % (var_name))\n elif value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if not var.has_ub():\n warn_int_bounds = True\n # GAMS has an option value called IntVarUp that is the\n # default upper integer bound, which it applies if the\n # integer's upper bound is INF. This option maxes out at\n # 2147483647, so we can go higher by setting the bound.\n logger.warning(\"Upper bound for integer variable %s set \"\n \"to +1.0E+100.\" % var.name)\n output_file.write(\"%s.up = +1.0E+100;\\n\" % (var_name))\n else:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'binary':\n if var.has_lb() and value(var.lb) != 0:\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub() and value(var.ub) != 1:\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n elif category == 'reals':\n if var.has_lb():\n output_file.write(\"%s.lo = %s;\\n\" %\n (var_name, _get_bound(var.lb)))\n if var.has_ub():\n output_file.write(\"%s.up = %s;\\n\" %\n (var_name, _get_bound(var.ub)))\n else:\n raise KeyError('Category %s not supported' % category)\n if warmstart and var.value is not None:\n output_file.write(\"%s.l = %s;\\n\" % (var_name, var.value))\n\n if warn_int_bounds:\n logger.warning(\n \"GAMS requires finite bounds for integer variables. 1.0E100 \"\n \"is as extreme as GAMS will define, and should be enough to \"\n \"appear unbounded. If the solver cannot handle this bound, \"\n \"explicitly set a smaller bound on the pyomo model, or try a \"\n \"different GAMS solver.\")\n\n model_name = \"GAMS_MODEL\"\n output_file.write(\"\\nMODEL %s /all/ ;\\n\" % model_name)\n\n if mtype is None:\n mtype = ('lp','nlp','mip','minlp')[\n (0 if linear else 1) +\n (2 if (categorized_vars.binary or categorized_vars.ints)\n else 0)]\n\n if solver is not None:\n if mtype.upper() not in valid_solvers[solver.upper()]:\n raise ValueError(\"GAMS writer passed solver (%s) \"\n \"unsuitable for model type (%s)\"\n % (solver, mtype))\n output_file.write(\"option %s=%s;\\n\" % (mtype, solver))\n\n if add_options is not None:\n output_file.write(\"\\n* START USER ADDITIONAL OPTIONS\\n\")\n for line in add_options:\n output_file.write('\\n' + line)\n output_file.write(\"\\n\\n* END USER ADDITIONAL OPTIONS\\n\\n\")\n\n output_file.write(\n \"SOLVE %s USING %s %simizing GAMS_OBJECTIVE;\\n\\n\"\n % ( model_name,\n mtype,\n 'min' if obj.sense == minimize else 'max'))\n\n # Set variables to store certain statuses and attributes\n stat_vars = ['MODELSTAT', 'SOLVESTAT', 'OBJEST', 'OBJVAL', 'NUMVAR',\n 'NUMEQU', 'NUMDVAR', 'NUMNZ', 'ETSOLVE']\n output_file.write(\"Scalars MODELSTAT 'model status', \"\n \"SOLVESTAT 'solve status';\\n\")\n output_file.write(\"MODELSTAT = %s.modelstat;\\n\" % model_name)\n output_file.write(\"SOLVESTAT = %s.solvestat;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar OBJEST 'best objective', \"\n \"OBJVAL 'objective value';\\n\")\n output_file.write(\"OBJEST = %s.objest;\\n\" % model_name)\n output_file.write(\"OBJVAL = %s.objval;\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMVAR 'number of variables';\\n\")\n output_file.write(\"NUMVAR = %s.numvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMEQU 'number of equations';\\n\")\n output_file.write(\"NUMEQU = %s.numequ\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMDVAR 'number of discrete variables';\\n\")\n output_file.write(\"NUMDVAR = %s.numdvar\\n\\n\" % model_name)\n\n output_file.write(\"Scalar NUMNZ 'number of nonzeros';\\n\")\n output_file.write(\"NUMNZ = %s.numnz\\n\\n\" % model_name)\n\n output_file.write(\"Scalar ETSOLVE 'time to execute solve statement';\\n\")\n output_file.write(\"ETSOLVE = %s.etsolve\\n\\n\" % model_name)\n\n if put_results is not None:\n results = put_results + '.dat'\n output_file.write(\"\\nfile results /'%s'/;\" % results)\n output_file.write(\"\\nresults.nd=15;\")\n output_file.write(\"\\nresults.nw=21;\")\n output_file.write(\"\\nput results;\")\n output_file.write(\"\\nput 'SYMBOL : LEVEL : MARGINAL' /;\")\n for var in var_list:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (var, var, var))\n for con in constraint_names:\n output_file.write(\"\\nput %s %s.l %s.m /;\" % (con, con, con))\n output_file.write(\"\\nput GAMS_OBJECTIVE GAMS_OBJECTIVE.l \"\n \"GAMS_OBJECTIVE.m;\\n\")\n\n statresults = put_results + 'stat.dat'\n output_file.write(\"\\nfile statresults /'%s'/;\" % statresults)\n output_file.write(\"\\nstatresults.nd=15;\")\n output_file.write(\"\\nstatresults.nw=21;\")\n output_file.write(\"\\nput statresults;\")\n output_file.write(\"\\nput 'SYMBOL : VALUE' /;\")\n for stat in stat_vars:\n output_file.write(\"\\nput '%s' %s /;\\n\" % (stat, stat))\n\n\nvalid_solvers = {\n'ALPHAECP': {'MINLP','MIQCP'},\n'AMPL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'ANTIGONE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BARON': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BDMLP': {'LP','MIP','RMIP'},\n'BDMLPD': {'LP','RMIP'},\n'BENCH': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'BONMIN': {'MINLP','MIQCP'},\n'BONMINH': {'MINLP','MIQCP'},\n'CBC': {'LP','MIP','RMIP'},\n'COINBONMIN': {'MINLP','MIQCP'},\n'COINCBC': {'LP','MIP','RMIP'},\n'COINCOUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINIPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'COINOS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'COINSCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT3': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPT4': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONOPTD': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'CONVERT': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CONVERTD': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'COUENNE': {'NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'CPLEX': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPLEXD': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'CPOPTIMIZER': {'MIP','MINLP','MIQCP'},\n'DE': {'EMP'},\n'DECIS': {'EMP'},\n'DECISC': {'LP'},\n'DECISM': {'LP'},\n'DICOPT': {'MINLP','MIQCP'},\n'DICOPTD': {'MINLP','MIQCP'},\n'EXAMINER': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'EXAMINER2': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GAMSCHK': {'LP','MIP','RMIP','NLP','MCP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'GLOMIQO': {'QCP','MIQCP','RMIQCP'},\n'GUROBI': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'},\n'GUSS': {'LP', 'MIP', 'NLP', 'MCP', 'CNS', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'IPOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'IPOPTH': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'JAMS': {'EMP'},\n'KESTREL': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'KNITRO': {'LP','RMIP','NLP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LGO': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LGOD': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'LINDO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP','EMP'},\n'LINDOGLOBAL': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LINGO': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','MINLP'},\n'LOCALSOLVER': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'LOGMIP': {'EMP'},\n'LS': {'LP','RMIP'},\n'MILES': {'MCP'},\n'MILESE': {'MCP'},\n'MINOS': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS5': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MINOS55': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'MOSEK': {'LP','MIP','RMIP','NLP','DNLP','RMINLP','QCP','MIQCP','RMIQCP'},\n'MPECDUMP': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'MPSGE': {},\n'MSNLP': {'NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'NLPEC': {'MCP','MPEC','RMPEC'},\n'OQNLP': {'NLP', 'DNLP', 'MINLP', 'QCP', 'MIQCP'},\n'OS': {'LP','MIP','RMIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'OSICPLEX': {'LP','MIP','RMIP'},\n'OSIGUROBI': {'LP','MIP','RMIP'},\n'OSIMOSEK': {'LP','MIP','RMIP'},\n'OSISOPLEX': {'LP','RMIP'},\n'OSIXPRESS': {'LP','MIP','RMIP'},\n'PATH': {'MCP','CNS'},\n'PATHC': {'MCP','CNS'},\n'PATHNLP': {'LP','RMIP','NLP','DNLP','RMINLP','QCP','RMIQCP'},\n'PYOMO': {'LP','MIP','RMIP','NLP','MCP','MPEC','RMPEC','CNS','DNLP','RMINLP','MINLP'},\n'QUADMINOS': {'LP'},\n'SBB': {'MINLP','MIQCP'},\n'SCENSOLVER': {'LP','MIP','RMIP','NLP','MCP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SCIP': {'MIP','NLP','CNS','DNLP','RMINLP','MINLP','QCP','MIQCP','RMIQCP'},\n'SNOPT': {'LP','RMIP','NLP','CNS','DNLP','RMINLP','QCP','RMIQCP'},\n'SOPLEX': {'LP','RMIP'},\n'XA': {'LP','MIP','RMIP'},\n'XPRESS': {'LP','MIP','RMIP','QCP','MIQCP','RMIQCP'}\n}\n",
"path": "pyomo/repn/plugins/gams_writer.py"
}
] | diff --git a/pyomo/repn/plugins/gams_writer.py b/pyomo/repn/plugins/gams_writer.py
index a8b579ef6ec..8bf28a45186 100644
--- a/pyomo/repn/plugins/gams_writer.py
+++ b/pyomo/repn/plugins/gams_writer.py
@@ -244,7 +244,9 @@ def split_long_line(line):
"Found an 80,000+ character string with no spaces")
i -= 1
new_lines += line[:i] + '\n'
- line = line[i + 1:]
+ # the space will be the first character in the next line,
+ # so that the line doesn't start with the comment character '*'
+ line = line[i:]
new_lines += line
return new_lines
diff --git a/pyomo/repn/tests/gams/test_gams.py b/pyomo/repn/tests/gams/test_gams.py
index fb581840b13..4c5c8f2c9d9 100644
--- a/pyomo/repn/tests/gams/test_gams.py
+++ b/pyomo/repn/tests/gams/test_gams.py
@@ -227,10 +227,16 @@ def test_split_long_line(self):
pat = "var1 + log(var2 / 9) - "
line = (pat * 10000) + "x"
self.assertEqual(split_long_line(line),
- pat * 3478 + "var1 +\nlog(var2 / 9) - " +
- pat * 3477 + "var1 +\nlog(var2 / 9) - " +
+ pat * 3478 + "var1 +\n log(var2 / 9) - " +
+ pat * 3477 + "var1 +\n log(var2 / 9) - " +
pat * 3043 + "x")
+ def test_split_long_line_no_comment(self):
+ pat = "1000 * 2000 * "
+ line = pat * 5715 + "x"
+ self.assertEqual(split_long_line(line),
+ pat * 5714 + "1000\n * 2000 * x")
+
def test_solver_arg(self):
m = ConcreteModel()
m.x = Var()
| gams writer, splitting lines with characters > 80,000
if line is > 80,000 the line is splitted at the last space within the fist 80,000 characters '(function 'split_long_line' of 'gams_writer.py' This mostly works but sometimes leads to an error if the space is followed by an '*' (multiply symbol).
|
fossasia__open-event-server-6182 | [
{
"content": "import random\nfrom datetime import datetime\n\nimport humanize\nimport pytz\nfrom flask import url_for\nfrom flask_scrypt import generate_password_hash, generate_random_salt\nfrom sqlalchemy import event, desc\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\nfrom app.models.custom_system_role import UserSystemRole, CustomSysRole\nfrom app.models.helpers.versioning import clean_up_string, clean_html\nfrom app.models.notification import Notification\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.permission import Permission\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.user_permission import UserPermission\nfrom app.models.users_events_role import UsersEventsRoles as UER\n\n# System-wide\nADMIN = 'admin'\nSUPERADMIN = 'super_admin'\n\nMARKETER = 'Marketer'\nSALES_ADMIN = 'Sales Admin'\n\nSYS_ROLES_LIST = [\n ADMIN,\n SUPERADMIN,\n]\n\n# Event-specific\nOWNER = 'owner'\nORGANIZER = 'organizer'\nCOORGANIZER = 'coorganizer'\nTRACK_ORGANIZER = 'track_organizer'\nMODERATOR = 'moderator'\nATTENDEE = 'attendee'\nREGISTRAR = 'registrar'\n\n\nclass User(SoftDeletionModel):\n \"\"\"User model class\"\"\"\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n _email = db.Column(db.String(120), unique=True, nullable=False)\n _password = db.Column(db.String(128), nullable=False)\n facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')\n facebook_login_hash = db.Column(db.String, nullable=True)\n reset_password = db.Column(db.String(128))\n salt = db.Column(db.String(128))\n avatar_url = db.Column(db.String)\n tokens = db.Column(db.Text)\n first_name = db.Column(db.String, nullable=True)\n last_name = db.Column(db.String, nullable=True)\n details = db.Column(db.String)\n contact = db.Column(db.String)\n facebook_url = db.Column(db.String)\n twitter_url = db.Column(db.String)\n instagram_url = db.Column(db.String)\n google_plus_url = db.Column(db.String)\n original_image_url = db.Column(db.String, nullable=True, default=None)\n thumbnail_image_url = db.Column(db.String)\n small_image_url = db.Column(db.String)\n icon_image_url = db.Column(db.String)\n is_super_admin = db.Column(db.Boolean, default=False)\n is_admin = db.Column(db.Boolean, default=False)\n is_sales_admin = db.Column(db.Boolean, default=False)\n is_marketer = db.Column(db.Boolean, default=False)\n is_verified = db.Column(db.Boolean, default=False)\n was_registered_with_order = db.Column(db.Boolean, default=False)\n last_accessed_at = db.Column(db.DateTime(timezone=True))\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n # Event Invoice Details\n billing_contact_name = db.Column(db.String)\n billing_phone = db.Column(db.String)\n billing_state = db.Column(db.String)\n billing_country = db.Column(db.String)\n billing_tax_info = db.Column(db.String)\n company = db.Column(db.String)\n billing_address = db.Column(db.String)\n billing_city = db.Column(db.String)\n billing_zip_code = db.Column(db.String)\n billing_additional_info = db.Column(db.String)\n\n # relationships\n speaker = db.relationship('Speaker', backref=\"user\")\n favourite_events = db.relationship('UserFavouriteEvent', backref=\"user\")\n session = db.relationship('Session', backref=\"user\")\n feedback = db.relationship('Feedback', backref=\"user\")\n access_codes = db.relationship('AccessCode', backref=\"user\")\n discount_codes = db.relationship('DiscountCode', backref=\"user\")\n marketer_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Marketer\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id'\n )\n sales_admin_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Sales Admin\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id')\n\n @hybrid_property\n def password(self):\n \"\"\"\n Hybrid property for password\n :return:\n \"\"\"\n return self._password\n\n @password.setter\n def password(self, password):\n \"\"\"\n Setter for _password, saves hashed password, salt and reset_password string\n :param password:\n :return:\n \"\"\"\n salt = str(generate_random_salt(), 'utf-8')\n self._password = str(generate_password_hash(password, salt), 'utf-8')\n hash_ = random.getrandbits(128)\n self.reset_password = str(hash_)\n self.salt = salt\n\n @hybrid_property\n def email(self):\n \"\"\"\n Hybrid property for email\n :return:\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"\n Setter for _email,\n set user to 'not verified' if email is updated\n :param email:\n :return:\n \"\"\"\n if self._email != email:\n self._email = email\n self.is_verified = False\n\n # User Permissions\n def can_publish_event(self):\n \"\"\"\n Checks if User can publish an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='publish_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def can_create_event(self):\n \"\"\"\n Checks if User can create an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='create_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def has_role(self, event_id):\n \"\"\"\n Checks if user has any of the Roles at an Event.\n Exclude Attendee Role.\n \"\"\"\n attendee_role = Role.query.filter_by(name=ATTENDEE).first()\n uer = UER.query.filter(UER.user == self, UER.event_id == event_id,\n UER.role != attendee_role).first()\n if uer is None:\n return False\n else:\n return True\n\n def _is_role(self, role_name, event_id=None):\n \"\"\"\n Checks if a user has a particular Role at an Event.\n \"\"\"\n role = Role.query.filter_by(name=role_name).first()\n if event_id:\n uer = UER.query.filter_by(user=self,\n event_id=event_id,\n role=role).first()\n else:\n uer = UER.query.filter_by(user=self,\n role=role).first()\n if not uer:\n return False\n else:\n return True\n\n def is_owner(self, event_id):\n return self._is_role(OWNER, event_id)\n\n def is_organizer(self, event_id):\n # type: (object) -> object\n return self._is_role(ORGANIZER, event_id)\n\n def is_coorganizer(self, event_id):\n return self._is_role(COORGANIZER, event_id)\n\n def is_track_organizer(self, event_id):\n return self._is_role(TRACK_ORGANIZER, event_id)\n\n def is_moderator(self, event_id):\n return self._is_role(MODERATOR, event_id)\n\n def is_registrar(self, event_id):\n return self._is_role(REGISTRAR, event_id)\n\n def is_attendee(self, event_id):\n return self._is_role(ATTENDEE, event_id)\n\n def has_event_access(self, event_id):\n return self._is_role(OWNER, event_id) or self._is_role(ORGANIZER, event_id) or \\\n self._is_role(COORGANIZER, event_id)\n\n @hybrid_property\n def is_user_owner(self):\n return self._is_role(OWNER)\n\n @hybrid_property\n def is_user_organizer(self):\n # type: (object) -> object\n return self._is_role(ORGANIZER)\n\n @hybrid_property\n def is_user_coorganizer(self):\n return self._is_role(COORGANIZER)\n\n @hybrid_property\n def is_user_track_organizer(self):\n return self._is_role(TRACK_ORGANIZER)\n\n @hybrid_property\n def is_user_moderator(self):\n return self._is_role(MODERATOR)\n\n @hybrid_property\n def is_user_registrar(self):\n return self._is_role(REGISTRAR)\n\n @hybrid_property\n def is_user_attendee(self):\n return self._is_role(ATTENDEE)\n\n def _has_perm(self, operation, service_class, event_id):\n # Operation names and their corresponding permission in `Permissions`\n operations = {\n 'create': 'can_create',\n 'read': 'can_read',\n 'update': 'can_update',\n 'delete': 'can_delete',\n }\n if operation not in list(operations.keys()):\n raise ValueError('No such operation defined')\n\n try:\n service_name = service_class.get_service_name()\n except AttributeError:\n # If `service_class` does not have `get_service_name()`\n return False\n\n if self.is_super_admin:\n return True\n\n service = Service.query.filter_by(name=service_name).first()\n\n uer_querylist = UER.query.filter_by(user=self,\n event_id=event_id)\n for uer in uer_querylist:\n role = uer.role\n perm = Permission.query.filter_by(role=role,\n service=service).first()\n if getattr(perm, operations[operation]):\n return True\n\n return False\n\n def can_create(self, service_class, event_id):\n return self._has_perm('create', service_class, event_id)\n\n def can_read(self, service_class, event_id):\n return self._has_perm('read', service_class, event_id)\n\n def can_update(self, service_class, event_id):\n return self._has_perm('update', service_class, event_id)\n\n def can_delete(self, service_class, event_id):\n return self._has_perm('delete', service_class, event_id)\n\n def is_speaker_at_session(self, session_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.id == session_id).one()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n def is_speaker_at_event(self, event_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.event_id == event_id).first()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n # Flask-Login integration\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return self.id\n\n def is_correct_password(self, password):\n salt = self.salt\n password = str(generate_password_hash(password, salt), 'utf-8')\n if password == self._password:\n return True\n return False\n\n @property\n def is_staff(self):\n return self.is_super_admin or self.is_admin\n\n def is_sys_role(self, role_id):\n \"\"\"\n Check if a user has a Custom System Role assigned.\n `role_id` is id of a `CustomSysRole` instance.\n \"\"\"\n role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()\n return bool(role)\n\n def first_access_panel(self):\n \"\"\"\n Check if the user is assigned a Custom Role or not\n This checks if there is an entry containing the current user in the `user_system_roles` table\n returns panel name if exists otherwise false\n \"\"\"\n custom_role = UserSystemRole.query.filter_by(user=self).first()\n if not custom_role:\n return False\n perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()\n if not perm:\n return False\n return perm.panel_name\n\n def can_download_tickets(self, order):\n permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]\n if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:\n return True\n return False\n\n def can_access_panel(self, panel_name):\n \"\"\"\n Check if user can access an Admin Panel\n \"\"\"\n if self.is_staff:\n return True\n\n custom_sys_roles = UserSystemRole.query.filter_by(user=self)\n for custom_role in custom_sys_roles:\n if custom_role.role.can_access(panel_name):\n return True\n\n return False\n\n def get_unread_notif_count(self):\n return get_count(Notification.query.filter_by(user=self, is_read=False))\n\n def get_unread_notifs(self):\n \"\"\"\n Get unread notifications with titles, humanized receiving time\n and Mark-as-read links.\n \"\"\"\n notifs = []\n unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(\n desc(Notification.received_at))\n for notif in unread_notifs:\n notifs.append({\n 'title': notif.title,\n 'received_at': humanize.naturaltime(datetime.now(pytz.utc) - notif.received_at),\n 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)\n })\n\n return notifs\n\n # update last access time\n def update_lat(self):\n self.last_accessed_at = datetime.now(pytz.utc)\n\n @property\n def fullname(self):\n firstname = self.first_name if self.first_name else ''\n lastname = self.last_name if self.last_name else ''\n if firstname and lastname:\n return '{} {}'.format(firstname, lastname)\n else:\n return ''\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n def __str__(self):\n return self.__repr__()\n\n def __setattr__(self, name, value):\n if name == 'details':\n super(User, self).__setattr__(name, clean_html(clean_up_string(value)))\n else:\n super(User, self).__setattr__(name, value)\n\n\[email protected]_for(User, 'init')\ndef receive_init(target, args, kwargs):\n target.signup_at = datetime.now(pytz.utc)\n",
"path": "app/models/user.py"
}
] | [
{
"content": "import random\nfrom datetime import datetime\n\nimport humanize\nimport pytz\nfrom flask import url_for\nfrom flask_scrypt import generate_password_hash, generate_random_salt\nfrom sqlalchemy import event, desc\nfrom sqlalchemy.ext.hybrid import hybrid_property\nfrom sqlalchemy.sql import func\nfrom sqlalchemy.orm.exc import MultipleResultsFound, NoResultFound\n\nfrom app.api.helpers.db import get_count\nfrom app.models import db\nfrom app.models.base import SoftDeletionModel\nfrom app.models.custom_system_role import UserSystemRole, CustomSysRole\nfrom app.models.helpers.versioning import clean_up_string, clean_html\nfrom app.models.notification import Notification\nfrom app.models.panel_permission import PanelPermission\nfrom app.models.permission import Permission\nfrom app.models.role import Role\nfrom app.models.service import Service\nfrom app.models.session import Session\nfrom app.models.speaker import Speaker\nfrom app.models.user_permission import UserPermission\nfrom app.models.users_events_role import UsersEventsRoles as UER\n\n# System-wide\nADMIN = 'admin'\nSUPERADMIN = 'super_admin'\n\nMARKETER = 'Marketer'\nSALES_ADMIN = 'Sales Admin'\n\nSYS_ROLES_LIST = [\n ADMIN,\n SUPERADMIN,\n]\n\n# Event-specific\nOWNER = 'owner'\nORGANIZER = 'organizer'\nCOORGANIZER = 'coorganizer'\nTRACK_ORGANIZER = 'track_organizer'\nMODERATOR = 'moderator'\nATTENDEE = 'attendee'\nREGISTRAR = 'registrar'\n\n\nclass User(SoftDeletionModel):\n \"\"\"User model class\"\"\"\n __tablename__ = 'users'\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n _email = db.Column(db.String(120), unique=True, nullable=False)\n _password = db.Column(db.String(128), nullable=False)\n facebook_id = db.Column(db.BigInteger, unique=True, nullable=True, name='facebook_id')\n facebook_login_hash = db.Column(db.String, nullable=True)\n reset_password = db.Column(db.String(128))\n salt = db.Column(db.String(128))\n avatar_url = db.Column(db.String)\n tokens = db.Column(db.Text)\n first_name = db.Column(db.String, nullable=True)\n last_name = db.Column(db.String, nullable=True)\n details = db.Column(db.String)\n contact = db.Column(db.String)\n facebook_url = db.Column(db.String)\n twitter_url = db.Column(db.String)\n instagram_url = db.Column(db.String)\n google_plus_url = db.Column(db.String)\n original_image_url = db.Column(db.String, nullable=True, default=None)\n thumbnail_image_url = db.Column(db.String)\n small_image_url = db.Column(db.String)\n icon_image_url = db.Column(db.String)\n is_super_admin = db.Column(db.Boolean, default=False)\n is_admin = db.Column(db.Boolean, default=False)\n is_sales_admin = db.Column(db.Boolean, default=False)\n is_marketer = db.Column(db.Boolean, default=False)\n is_verified = db.Column(db.Boolean, default=False)\n was_registered_with_order = db.Column(db.Boolean, default=False)\n last_accessed_at = db.Column(db.DateTime(timezone=True))\n created_at = db.Column(db.DateTime(timezone=True), default=func.now())\n # Event Invoice Details\n billing_contact_name = db.Column(db.String)\n billing_phone = db.Column(db.String)\n billing_state = db.Column(db.String)\n billing_country = db.Column(db.String)\n billing_tax_info = db.Column(db.String)\n company = db.Column(db.String)\n billing_address = db.Column(db.String)\n billing_city = db.Column(db.String)\n billing_zip_code = db.Column(db.String)\n billing_additional_info = db.Column(db.String)\n\n # relationships\n speaker = db.relationship('Speaker', backref=\"user\")\n favourite_events = db.relationship('UserFavouriteEvent', backref=\"user\")\n session = db.relationship('Session', backref=\"user\")\n feedback = db.relationship('Feedback', backref=\"user\")\n access_codes = db.relationship('AccessCode', backref=\"user\")\n discount_codes = db.relationship('DiscountCode', backref=\"user\")\n marketer_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Marketer\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id'\n )\n sales_admin_events = db.relationship(\n 'Event',\n viewonly=True,\n secondary='join(UserSystemRole, CustomSysRole,'\n ' and_(CustomSysRole.id == UserSystemRole.role_id, CustomSysRole.name == \"Sales Admin\"))',\n primaryjoin='UserSystemRole.user_id == User.id',\n secondaryjoin='Event.id == UserSystemRole.event_id')\n\n @hybrid_property\n def password(self):\n \"\"\"\n Hybrid property for password\n :return:\n \"\"\"\n return self._password\n\n @password.setter\n def password(self, password):\n \"\"\"\n Setter for _password, saves hashed password, salt and reset_password string\n :param password:\n :return:\n \"\"\"\n salt = str(generate_random_salt(), 'utf-8')\n self._password = str(generate_password_hash(password, salt), 'utf-8')\n hash_ = random.getrandbits(128)\n self.reset_password = str(hash_)\n self.salt = salt\n\n @hybrid_property\n def email(self):\n \"\"\"\n Hybrid property for email\n :return:\n \"\"\"\n return self._email\n\n @email.setter\n def email(self, email):\n \"\"\"\n Setter for _email,\n set user to 'not verified' if email is updated\n :param email:\n :return:\n \"\"\"\n if self._email != email:\n self._email = email\n self.is_verified = False\n\n # User Permissions\n def can_publish_event(self):\n \"\"\"\n Checks if User can publish an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='publish_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def can_create_event(self):\n \"\"\"\n Checks if User can create an event\n \"\"\"\n perm = UserPermission.query.filter_by(name='create_event').first()\n if not perm:\n return self.is_verified\n\n if self.is_verified is False:\n return perm.unverified_user\n\n return True\n\n def has_role(self, event_id):\n \"\"\"\n Checks if user has any of the Roles at an Event.\n Exclude Attendee Role.\n \"\"\"\n attendee_role = Role.query.filter_by(name=ATTENDEE).first()\n uer = UER.query.filter(UER.user == self, UER.event_id == event_id,\n UER.role != attendee_role).first()\n if uer is None:\n return False\n else:\n return True\n\n def _is_role(self, role_name, event_id=None):\n \"\"\"\n Checks if a user has a particular Role at an Event.\n \"\"\"\n role = Role.query.filter_by(name=role_name).first()\n if event_id:\n uer = UER.query.filter_by(user=self,\n event_id=event_id,\n role=role).first()\n else:\n uer = UER.query.filter_by(user=self,\n role=role).first()\n if not uer:\n return False\n else:\n return True\n\n def is_owner(self, event_id):\n return self._is_role(OWNER, event_id)\n\n def is_organizer(self, event_id):\n # type: (object) -> object\n return self._is_role(ORGANIZER, event_id)\n\n def is_coorganizer(self, event_id):\n return self._is_role(COORGANIZER, event_id)\n\n def is_track_organizer(self, event_id):\n return self._is_role(TRACK_ORGANIZER, event_id)\n\n def is_moderator(self, event_id):\n return self._is_role(MODERATOR, event_id)\n\n def is_registrar(self, event_id):\n return self._is_role(REGISTRAR, event_id)\n\n def is_attendee(self, event_id):\n return self._is_role(ATTENDEE, event_id)\n\n def has_event_access(self, event_id):\n return self._is_role(OWNER, event_id) or self._is_role(ORGANIZER, event_id) or \\\n self._is_role(COORGANIZER, event_id)\n\n @hybrid_property\n def is_user_owner(self):\n return self._is_role(OWNER)\n\n @hybrid_property\n def is_user_organizer(self):\n # type: (object) -> object\n return self._is_role(ORGANIZER)\n\n @hybrid_property\n def is_user_coorganizer(self):\n return self._is_role(COORGANIZER)\n\n @hybrid_property\n def is_user_track_organizer(self):\n return self._is_role(TRACK_ORGANIZER)\n\n @hybrid_property\n def is_user_moderator(self):\n return self._is_role(MODERATOR)\n\n @hybrid_property\n def is_user_registrar(self):\n return self._is_role(REGISTRAR)\n\n @hybrid_property\n def is_user_attendee(self):\n return self._is_role(ATTENDEE)\n\n def _has_perm(self, operation, service_class, event_id):\n # Operation names and their corresponding permission in `Permissions`\n operations = {\n 'create': 'can_create',\n 'read': 'can_read',\n 'update': 'can_update',\n 'delete': 'can_delete',\n }\n if operation not in list(operations.keys()):\n raise ValueError('No such operation defined')\n\n try:\n service_name = service_class.get_service_name()\n except AttributeError:\n # If `service_class` does not have `get_service_name()`\n return False\n\n if self.is_super_admin:\n return True\n\n service = Service.query.filter_by(name=service_name).first()\n\n uer_querylist = UER.query.filter_by(user=self,\n event_id=event_id)\n for uer in uer_querylist:\n role = uer.role\n perm = Permission.query.filter_by(role=role,\n service=service).first()\n if getattr(perm, operations[operation]):\n return True\n\n return False\n\n def can_create(self, service_class, event_id):\n return self._has_perm('create', service_class, event_id)\n\n def can_read(self, service_class, event_id):\n return self._has_perm('read', service_class, event_id)\n\n def can_update(self, service_class, event_id):\n return self._has_perm('update', service_class, event_id)\n\n def can_delete(self, service_class, event_id):\n return self._has_perm('delete', service_class, event_id)\n\n def is_speaker_at_session(self, session_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.id == session_id).one()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n def is_speaker_at_event(self, event_id):\n try:\n session = Session.query.filter(Session.speakers.any(Speaker.user_id == self.id)).filter(\n Session.event_id == event_id).first()\n if session:\n return True\n else:\n return False\n except MultipleResultsFound:\n return False\n except NoResultFound:\n return False\n\n # Flask-Login integration\n def is_authenticated(self):\n return True\n\n def is_active(self):\n return True\n\n def is_anonymous(self):\n return False\n\n def get_id(self):\n return self.id\n\n def is_correct_password(self, password):\n salt = self.salt\n password = str(generate_password_hash(password, salt), 'utf-8')\n if password == self._password:\n return True\n return False\n\n @property\n def is_staff(self):\n return self.is_super_admin or self.is_admin\n\n def is_sys_role(self, role_id):\n \"\"\"\n Check if a user has a Custom System Role assigned.\n `role_id` is id of a `CustomSysRole` instance.\n \"\"\"\n role = UserSystemRole.query.filter_by(user=self, role_id=role_id).first()\n return bool(role)\n\n def first_access_panel(self):\n \"\"\"\n Check if the user is assigned a Custom Role or not\n This checks if there is an entry containing the current user in the `user_system_roles` table\n returns panel name if exists otherwise false\n \"\"\"\n custom_role = UserSystemRole.query.filter_by(user=self).first()\n if not custom_role:\n return False\n perm = PanelPermission.query.filter(PanelPermission.custom_system_roles.any(id=custom_role.role_id)).first()\n if not perm:\n return False\n return perm.panel_name\n\n def can_download_tickets(self, order):\n permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]\n if self.is_staff or self.has_event_access(order.event.id) or self.id in permissible_users:\n return True\n return False\n\n def can_access_panel(self, panel_name):\n \"\"\"\n Check if user can access an Admin Panel\n \"\"\"\n if self.is_staff:\n return True\n\n custom_sys_roles = UserSystemRole.query.filter_by(user=self)\n for custom_role in custom_sys_roles:\n if custom_role.role.can_access(panel_name):\n return True\n\n return False\n\n def get_unread_notif_count(self):\n return get_count(Notification.query.filter_by(user=self, is_read=False))\n\n def get_unread_notifs(self):\n \"\"\"\n Get unread notifications with titles, humanized receiving time\n and Mark-as-read links.\n \"\"\"\n notifs = []\n unread_notifs = Notification.query.filter_by(user=self, is_read=False).order_by(\n desc(Notification.received_at))\n for notif in unread_notifs:\n notifs.append({\n 'title': notif.title,\n 'received_at': humanize.naturaltime(datetime.now(pytz.utc) - notif.received_at),\n 'mark_read': url_for('notifications.mark_as_read', notification_id=notif.id)\n })\n\n return notifs\n\n # update last access time\n def update_lat(self):\n self.last_accessed_at = datetime.now(pytz.utc)\n\n @property\n def fullname(self):\n firstname = self.first_name if self.first_name else ''\n lastname = self.last_name if self.last_name else ''\n if firstname and lastname:\n return '{} {}'.format(firstname, lastname)\n else:\n return ''\n\n def __repr__(self):\n return '<User %r>' % self.email\n\n def __str__(self):\n return self.__repr__()\n\n def __setattr__(self, name, value):\n if name == 'details':\n super(User, self).__setattr__(name, clean_html(clean_up_string(value)))\n else:\n super(User, self).__setattr__(name, value)\n\n\[email protected]_for(User, 'init')\ndef receive_init(target, args, kwargs):\n target.signup_at = datetime.now(pytz.utc)\n",
"path": "app/models/user.py"
}
] | diff --git a/app/models/user.py b/app/models/user.py
index 697f6a97f4..80c3f2f4c9 100644
--- a/app/models/user.py
+++ b/app/models/user.py
@@ -387,7 +387,7 @@ def first_access_panel(self):
def can_download_tickets(self, order):
permissible_users = [holder.id for holder in order.ticket_holders] + [order.user.id]
- if self.is_staff or self.is_organizer(order.event.id) or self.id in permissible_users:
+ if self.is_staff or self.has_event_access(order.event.id) or self.id in permissible_users:
return True
return False
| Coorganizer and owners can't download tickets, invoices
Current config only allows the organizer role to download the tickets
|
zulip__zulip-20678 | [
{
"content": "#!/usr/bin/env python3\nimport argparse\nimport configparser\nimport datetime\nimport functools\nimport hashlib\nimport json\nimport logging\nimport os\nimport pwd\nimport random\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom typing import Any, Dict, List, Sequence, Set\nfrom urllib.parse import SplitResult\n\nDEPLOYMENTS_DIR = \"/home/zulip/deployments\"\nLOCK_DIR = os.path.join(DEPLOYMENTS_DIR, \"lock\")\nTIMESTAMP_FORMAT = \"%Y-%m-%d-%H-%M-%S\"\n\n# Color codes\nOKBLUE = \"\\033[94m\"\nOKGREEN = \"\\033[92m\"\nWARNING = \"\\033[93m\"\nFAIL = \"\\033[91m\"\nENDC = \"\\033[0m\"\nBLACKONYELLOW = \"\\x1b[0;30;43m\"\nWHITEONRED = \"\\x1b[0;37;41m\"\nBOLDRED = \"\\x1B[1;31m\"\n\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nMAGENTA = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\n\n\ndef overwrite_symlink(src: str, dst: str) -> None:\n dir, base = os.path.split(dst)\n while True:\n # Note: creating a temporary filename like this is not generally\n # secure. It’s fine in this case because os.symlink refuses to\n # overwrite an existing target; we handle the error and try again.\n tmp = os.path.join(dir, f\".{base}.{random.randrange(1 << 40):010x}\")\n try:\n os.symlink(src, tmp)\n except FileExistsError:\n continue\n break\n try:\n os.rename(tmp, dst)\n except BaseException:\n os.remove(tmp)\n raise\n\n\ndef parse_cache_script_args(description: str) -> argparse.Namespace:\n # Keep this in sync with clean_unused_caches in provision_inner.py\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"--threshold\",\n dest=\"threshold_days\",\n type=int,\n default=14,\n metavar=\"<days>\",\n help=\"Any cache which is not in \"\n \"use by a deployment not older than threshold days(current \"\n \"installation in dev) and older than threshold days will be \"\n \"deleted. (defaults to 14)\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"If specified then script will only print the caches \"\n \"that it will delete/keep back. It will not delete any cache.\",\n )\n parser.add_argument(\n \"--verbose\",\n action=\"store_true\",\n help=\"If specified then script will print a detailed report \"\n \"of what is being will deleted/kept back.\",\n )\n parser.add_argument(\n \"--no-print-headings\",\n dest=\"no_headings\",\n action=\"store_true\",\n help=\"If specified then script will not print headings for \"\n \"what will be deleted/kept back.\",\n )\n\n args = parser.parse_args()\n args.verbose |= args.dry_run # Always print a detailed report in case of dry run.\n return args\n\n\ndef get_deploy_root() -> str:\n return os.path.realpath(\n os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\")),\n )\n\n\ndef get_deployment_version(extract_path: str) -> str:\n version = \"0.0.0\"\n for item in os.listdir(extract_path):\n item_path = os.path.join(extract_path, item)\n if item.startswith(\"zulip-server\") and os.path.isdir(item_path):\n with open(os.path.join(item_path, \"version.py\")) as f:\n result = re.search('ZULIP_VERSION = \"(.*)\"', f.read())\n if result:\n version = result.groups()[0]\n break\n return version\n\n\ndef is_invalid_upgrade(current_version: str, new_version: str) -> bool:\n if new_version > \"1.4.3\" and current_version <= \"1.3.10\":\n return True\n return False\n\n\ndef get_zulip_pwent() -> pwd.struct_passwd:\n deploy_root_uid = os.stat(get_deploy_root()).st_uid\n if deploy_root_uid != 0:\n return pwd.getpwuid(deploy_root_uid)\n\n # In the case that permissions got messed up and the deployment\n # directory is unexpectedly owned by root, we fallback to the\n # `zulip` user as that's the correct value in production.\n return pwd.getpwnam(\"zulip\")\n\n\ndef get_postgres_pwent() -> pwd.struct_passwd:\n try:\n return pwd.getpwnam(\"postgres\")\n except KeyError:\n return get_zulip_pwent()\n\n\ndef su_to_zulip(save_suid: bool = False) -> None:\n \"\"\"Warning: su_to_zulip assumes that the zulip checkout is owned by\n the zulip user (or whatever normal user is running the Zulip\n installation). It should never be run from the installer or other\n production contexts before /home/zulip/deployments/current is\n created.\"\"\"\n pwent = get_zulip_pwent()\n os.setgid(pwent.pw_gid)\n if save_suid:\n os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())\n else:\n os.setuid(pwent.pw_uid)\n os.environ[\"HOME\"] = pwent.pw_dir\n\n\ndef make_deploy_path() -> str:\n timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n return os.path.join(DEPLOYMENTS_DIR, timestamp)\n\n\nTEMPLATE_DATABASE_DIR = \"test-backend/databases\"\n\n\ndef get_dev_uuid_var_path(create_if_missing: bool = False) -> str:\n zulip_path = get_deploy_root()\n uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), \".zulip-dev-uuid\")\n if os.path.exists(uuid_path):\n with open(uuid_path) as f:\n zulip_uuid = f.read().strip()\n else:\n if create_if_missing:\n zulip_uuid = str(uuid.uuid4())\n # We need root access here, since the path will be under /srv/ in the\n # development environment.\n run_as_root([\"sh\", \"-c\", 'echo \"$1\" > \"$2\"', \"-\", zulip_uuid, uuid_path])\n else:\n raise AssertionError(\"Missing UUID file; please run tools/provision!\")\n\n result_path = os.path.join(zulip_path, \"var\", zulip_uuid)\n os.makedirs(result_path, exist_ok=True)\n return result_path\n\n\ndef get_deployment_lock(error_rerun_script: str) -> None:\n start_time = time.time()\n got_lock = False\n while time.time() - start_time < 300:\n try:\n os.mkdir(LOCK_DIR)\n got_lock = True\n break\n except OSError:\n print(\n WARNING\n + \"Another deployment in progress; waiting for lock... \"\n + f\"(If no deployment is running, rmdir {LOCK_DIR})\"\n + ENDC,\n flush=True,\n )\n time.sleep(3)\n\n if not got_lock:\n print(\n FAIL\n + \"Deployment already in progress. Please run\\n\"\n + f\" {error_rerun_script}\\n\"\n + \"manually when the previous deployment finishes, or run\\n\"\n + f\" rmdir {LOCK_DIR}\\n\"\n + \"if the previous deployment crashed.\"\n + ENDC\n )\n sys.exit(1)\n\n\ndef release_deployment_lock() -> None:\n shutil.rmtree(LOCK_DIR)\n\n\ndef run(args: Sequence[str], **kwargs: Any) -> None:\n # Output what we're doing in the `set -x` style\n print(\"+ {}\".format(\" \".join(map(shlex.quote, args))), flush=True)\n\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n print()\n print(\n WHITEONRED\n + \"Error running a subcommand of {}: {}\".format(\n sys.argv[0],\n \" \".join(map(shlex.quote, args)),\n )\n + ENDC\n )\n print(WHITEONRED + \"Actual error output for the subcommand is just above this.\" + ENDC)\n print()\n sys.exit(1)\n\n\ndef log_management_command(cmd: Sequence[str], log_path: str) -> None:\n log_dir = os.path.dirname(log_path)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n logger = logging.getLogger(\"zulip.management\")\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n logger.info(\"Ran %s\", \" \".join(map(shlex.quote, cmd)))\n\n\ndef get_environment() -> str:\n if os.path.exists(DEPLOYMENTS_DIR):\n return \"prod\"\n return \"dev\"\n\n\ndef get_recent_deployments(threshold_days: int) -> Set[str]:\n # Returns a list of deployments not older than threshold days\n # including `/root/zulip` directory if it exists.\n recent = set()\n threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n for dir_name in os.listdir(DEPLOYMENTS_DIR):\n target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)\n if not os.path.isdir(target_dir):\n # Skip things like uwsgi sockets, symlinks, etc.\n continue\n if not os.path.exists(os.path.join(target_dir, \"zerver\")):\n # Skip things like \"lock\" that aren't actually a deployment directory\n continue\n try:\n date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)\n if date >= threshold_date:\n recent.add(target_dir)\n except ValueError:\n # Always include deployments whose name is not in the format of a timestamp.\n recent.add(target_dir)\n # If it is a symlink then include the target as well.\n if os.path.islink(target_dir):\n recent.add(os.path.realpath(target_dir))\n if os.path.exists(\"/root/zulip\"):\n recent.add(\"/root/zulip\")\n return recent\n\n\ndef get_threshold_timestamp(threshold_days: int) -> int:\n # Given number of days, this function returns timestamp corresponding\n # to the time prior to given number of days.\n threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n threshold_timestamp = int(time.mktime(threshold.utctimetuple()))\n return threshold_timestamp\n\n\ndef get_caches_to_be_purged(\n caches_dir: str, caches_in_use: Set[str], threshold_days: int\n) -> Set[str]:\n # Given a directory containing caches, a list of caches in use\n # and threshold days, this function return a list of caches\n # which can be purged. Remove the cache only if it is:\n # 1: Not in use by the current installation(in dev as well as in prod).\n # 2: Not in use by a deployment not older than `threshold_days`(in prod).\n # 3: Not in use by '/root/zulip'.\n # 4: Not older than `threshold_days`.\n caches_to_purge = set()\n threshold_timestamp = get_threshold_timestamp(threshold_days)\n for cache_dir_base in os.listdir(caches_dir):\n cache_dir = os.path.join(caches_dir, cache_dir_base)\n if cache_dir in caches_in_use:\n # Never purge a cache which is in use.\n continue\n if os.path.getctime(cache_dir) < threshold_timestamp:\n caches_to_purge.add(cache_dir)\n return caches_to_purge\n\n\ndef purge_unused_caches(\n caches_dir: str,\n caches_in_use: Set[str],\n cache_type: str,\n args: argparse.Namespace,\n) -> None:\n all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}\n caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)\n caches_to_keep = all_caches - caches_to_purge\n\n may_be_perform_purging(\n caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings\n )\n if args.verbose:\n print(\"Done!\")\n\n\ndef generate_sha1sum_emoji(zulip_path: str) -> str:\n sha = hashlib.sha1()\n\n filenames = [\n \"static/assets/zulip-emoji/zulip.png\",\n \"tools/setup/emoji/emoji_map.json\",\n \"tools/setup/emoji/build_emoji\",\n \"tools/setup/emoji/emoji_setup_utils.py\",\n \"tools/setup/emoji/emoji_names.py\",\n ]\n\n for filename in filenames:\n file_path = os.path.join(zulip_path, filename)\n with open(file_path, \"rb\") as reader:\n sha.update(reader.read())\n\n # Take into account the version of `emoji-datasource-google` package\n # while generating success stamp.\n PACKAGE_FILE_PATH = os.path.join(zulip_path, \"package.json\")\n with open(PACKAGE_FILE_PATH) as fp:\n parsed_package_file = json.load(fp)\n dependency_data = parsed_package_file[\"dependencies\"]\n\n if \"emoji-datasource-google\" in dependency_data:\n with open(os.path.join(zulip_path, \"yarn.lock\")) as fp:\n (emoji_datasource_version,) = re.findall(\n r\"^emoji-datasource-google@\"\n + re.escape(dependency_data[\"emoji-datasource-google\"])\n + r':\\n version \"(.*)\"',\n fp.read(),\n re.M,\n )\n else:\n emoji_datasource_version = \"0\"\n sha.update(emoji_datasource_version.encode())\n\n return sha.hexdigest()\n\n\ndef may_be_perform_purging(\n dirs_to_purge: Set[str],\n dirs_to_keep: Set[str],\n dir_type: str,\n dry_run: bool,\n verbose: bool,\n no_headings: bool,\n) -> None:\n if dry_run:\n print(\"Performing a dry run...\")\n if not no_headings:\n print(f\"Cleaning unused {dir_type}s...\")\n\n for directory in dirs_to_purge:\n if verbose:\n print(f\"Cleaning unused {dir_type}: {directory}\")\n if not dry_run:\n run_as_root([\"rm\", \"-rf\", directory])\n\n for directory in dirs_to_keep:\n if verbose:\n print(f\"Keeping used {dir_type}: {directory}\")\n\n\[email protected]_cache(None)\ndef parse_os_release() -> Dict[str, str]:\n \"\"\"\n Example of the useful subset of the data:\n {\n 'ID': 'ubuntu',\n 'VERSION_ID': '18.04',\n 'NAME': 'Ubuntu',\n 'VERSION': '18.04.3 LTS (Bionic Beaver)',\n 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',\n }\n\n VERSION_CODENAME (e.g. 'bionic') is nice and readable to Ubuntu\n developers, but we avoid using it, as it is not available on\n RHEL-based platforms.\n \"\"\"\n distro_info = {} # type: Dict[str, str]\n with open(\"/etc/os-release\") as fp:\n for line in fp:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n # The line may be blank or a comment, see:\n # https://www.freedesktop.org/software/systemd/man/os-release.html\n continue\n k, v = line.split(\"=\", 1)\n [distro_info[k]] = shlex.split(v)\n return distro_info\n\n\[email protected]_cache(None)\ndef os_families() -> Set[str]:\n \"\"\"\n Known families:\n debian (includes: debian, ubuntu)\n ubuntu (includes: ubuntu)\n fedora (includes: fedora, rhel, centos)\n rhel (includes: rhel, centos)\n centos (includes: centos)\n \"\"\"\n distro_info = parse_os_release()\n return {distro_info[\"ID\"], *distro_info.get(\"ID_LIKE\", \"\").split()}\n\n\ndef files_and_string_digest(filenames: Sequence[str], extra_strings: Sequence[str]) -> str:\n # see is_digest_obsolete for more context\n sha1sum = hashlib.sha1()\n for fn in filenames:\n with open(fn, \"rb\") as file_to_hash:\n sha1sum.update(file_to_hash.read())\n\n for extra_string in extra_strings:\n sha1sum.update(extra_string.encode())\n\n return sha1sum.hexdigest()\n\n\ndef is_digest_obsolete(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> bool:\n \"\"\"\n In order to determine if we need to run some\n process, we calculate a digest of the important\n files and strings whose respective contents\n or values may indicate such a need.\n\n filenames = files we should hash the contents of\n extra_strings = strings we should hash directly\n\n Grep for callers to see examples of how this is used.\n\n To elaborate on extra_strings, they will typically\n be things like:\n\n - package versions (that we import)\n - settings values (that we stringify with\n json, deterministically)\n \"\"\"\n last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n try:\n with open(last_hash_path) as f:\n old_hash = f.read()\n except FileNotFoundError:\n # This is normal for a fresh checkout--a missing\n # digest is an obsolete digest.\n return True\n\n new_hash = files_and_string_digest(filenames, extra_strings)\n\n return new_hash != old_hash\n\n\ndef write_new_digest(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> None:\n hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n new_hash = files_and_string_digest(filenames, extra_strings)\n with open(hash_path, \"w\") as f:\n f.write(new_hash)\n\n # Be a little verbose here--our callers ensure we\n # only write new digests when things have changed, and\n # making this system more transparent to developers\n # can help them troubleshoot provisioning glitches.\n print(\"New digest written to: \" + hash_path)\n\n\ndef is_root() -> bool:\n if \"posix\" in os.name and os.geteuid() == 0:\n return True\n return False\n\n\ndef run_as_root(args: List[str], **kwargs: Any) -> None:\n sudo_args = kwargs.pop(\"sudo_args\", [])\n if not is_root():\n args = [\"sudo\", *sudo_args, \"--\", *args]\n run(args, **kwargs)\n\n\ndef assert_not_running_as_root() -> None:\n script_name = os.path.abspath(sys.argv[0])\n if is_root():\n pwent = get_zulip_pwent()\n msg = (\n \"{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\\n\"\n \"user before rerunning this, or use \\n su {user} -c '{name} ...'\\n\"\n \"to switch users and run this as a single command.\"\n ).format(name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name)\n print(msg)\n sys.exit(1)\n\n\ndef assert_running_as_root(strip_lib_from_paths: bool = False) -> None:\n script_name = os.path.abspath(sys.argv[0])\n # Since these Python scripts are run inside a thin shell wrapper,\n # we need to replace the paths in order to ensure we instruct\n # users to (re)run the right command.\n if strip_lib_from_paths:\n script_name = script_name.replace(\"scripts/lib/upgrade\", \"scripts/upgrade\")\n if not is_root():\n print(f\"{script_name} must be run as root.\")\n sys.exit(1)\n\n\ndef get_config(\n config_file: configparser.RawConfigParser,\n section: str,\n key: str,\n default_value: str = \"\",\n) -> str:\n if config_file.has_option(section, key):\n return config_file.get(section, key)\n return default_value\n\n\ndef get_config_file() -> configparser.RawConfigParser:\n config_file = configparser.RawConfigParser()\n config_file.read(\"/etc/zulip/zulip.conf\")\n return config_file\n\n\ndef get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:\n return get_config(config_file, \"deployment\", \"deploy_options\", \"\").strip().split()\n\n\ndef run_psql_as_postgres(\n config_file: configparser.RawConfigParser,\n sql_query: str,\n) -> None:\n dbname = get_config(config_file, \"postgresql\", \"database_name\", \"zulip\")\n subcmd = \" \".join(\n map(\n shlex.quote,\n [\n \"psql\",\n \"-v\",\n \"ON_ERROR_STOP=1\",\n \"-d\",\n dbname,\n \"-c\",\n sql_query,\n ],\n )\n )\n subprocess.check_call([\"su\", \"postgres\", \"-c\", subcmd])\n\n\ndef get_tornado_ports(config_file: configparser.RawConfigParser) -> List[int]:\n ports = []\n if config_file.has_section(\"tornado_sharding\"):\n ports = [int(port) for port in config_file.options(\"tornado_sharding\")]\n if not ports:\n ports = [9800]\n return ports\n\n\ndef get_or_create_dev_uuid_var_path(path: str) -> str:\n absolute_path = f\"{get_dev_uuid_var_path()}/{path}\"\n os.makedirs(absolute_path, exist_ok=True)\n return absolute_path\n\n\ndef is_vagrant_env_host(path: str) -> bool:\n return \".vagrant\" in os.listdir(path)\n\n\ndef has_application_server(once: bool = False) -> bool:\n if once:\n return os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip-once.conf\")\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip.conf\")\n )\n\n\ndef list_supervisor_processes(*args: str) -> List[str]:\n worker_status = subprocess.run(\n [\"supervisorctl\", \"status\", *args],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n )\n # `supercisorctl status` returns 3 if any are stopped, which is\n # fine here; and exit code 4 is for no such process, which is\n # handled below.\n if worker_status.returncode not in (0, 3, 4):\n worker_status.check_returncode()\n\n processes = []\n for status_line in worker_status.stdout.splitlines():\n if not re.search(r\"ERROR \\(no such (process|group)\\)\", status_line):\n processes.append(status_line.split()[0])\n return processes\n\n\ndef has_process_fts_updates() -> bool:\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip_db.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip_db.conf\")\n )\n\n\ndef deport(netloc: str) -> str:\n \"\"\"Remove the port from a hostname:port string. Brackets on a literal\n IPv6 address are included.\"\"\"\n r = SplitResult(\"\", netloc, \"\", \"\", \"\")\n assert r.hostname is not None\n return \"[\" + r.hostname + \"]\" if \":\" in r.hostname else r.hostname\n\n\ndef start_arg_parser(action: str, add_help: bool = False) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(add_help=add_help)\n parser.add_argument(\"--fill-cache\", action=\"store_true\", help=\"Fill the memcached caches\")\n if action == \"restart\":\n parser.add_argument(\n \"--less-graceful\",\n action=\"store_true\",\n help=\"Restart with more concern for expediency than minimizing availability interruption\",\n )\n parser.add_argument(\n \"--skip-tornado\",\n action=\"store_true\",\n help=\"Do not restart Tornado processes\",\n )\n return parser\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"make_deploy_path\":\n print(make_deploy_path())\n elif cmd == \"get_dev_uuid\":\n print(get_dev_uuid_var_path())\n",
"path": "scripts/lib/zulip_tools.py"
}
] | [
{
"content": "#!/usr/bin/env python3\nimport argparse\nimport configparser\nimport datetime\nimport functools\nimport hashlib\nimport json\nimport logging\nimport os\nimport pwd\nimport random\nimport re\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport time\nimport uuid\nfrom typing import Any, Dict, List, Sequence, Set\nfrom urllib.parse import SplitResult\n\nDEPLOYMENTS_DIR = \"/home/zulip/deployments\"\nLOCK_DIR = os.path.join(DEPLOYMENTS_DIR, \"lock\")\nTIMESTAMP_FORMAT = \"%Y-%m-%d-%H-%M-%S\"\n\n# Color codes\nOKBLUE = \"\\033[94m\"\nOKGREEN = \"\\033[92m\"\nWARNING = \"\\033[93m\"\nFAIL = \"\\033[91m\"\nENDC = \"\\033[0m\"\nBLACKONYELLOW = \"\\x1b[0;30;43m\"\nWHITEONRED = \"\\x1b[0;37;41m\"\nBOLDRED = \"\\x1B[1;31m\"\n\nGREEN = \"\\x1b[32m\"\nYELLOW = \"\\x1b[33m\"\nBLUE = \"\\x1b[34m\"\nMAGENTA = \"\\x1b[35m\"\nCYAN = \"\\x1b[36m\"\n\n\ndef overwrite_symlink(src: str, dst: str) -> None:\n dir, base = os.path.split(dst)\n while True:\n # Note: creating a temporary filename like this is not generally\n # secure. It’s fine in this case because os.symlink refuses to\n # overwrite an existing target; we handle the error and try again.\n tmp = os.path.join(dir, f\".{base}.{random.randrange(1 << 40):010x}\")\n try:\n os.symlink(src, tmp)\n except FileExistsError:\n continue\n break\n try:\n os.rename(tmp, dst)\n except BaseException:\n os.remove(tmp)\n raise\n\n\ndef parse_cache_script_args(description: str) -> argparse.Namespace:\n # Keep this in sync with clean_unused_caches in provision_inner.py\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\n \"--threshold\",\n dest=\"threshold_days\",\n type=int,\n default=14,\n metavar=\"<days>\",\n help=\"Any cache which is not in \"\n \"use by a deployment not older than threshold days(current \"\n \"installation in dev) and older than threshold days will be \"\n \"deleted. (defaults to 14)\",\n )\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n help=\"If specified then script will only print the caches \"\n \"that it will delete/keep back. It will not delete any cache.\",\n )\n parser.add_argument(\n \"--verbose\",\n action=\"store_true\",\n help=\"If specified then script will print a detailed report \"\n \"of what is being will deleted/kept back.\",\n )\n parser.add_argument(\n \"--no-print-headings\",\n dest=\"no_headings\",\n action=\"store_true\",\n help=\"If specified then script will not print headings for \"\n \"what will be deleted/kept back.\",\n )\n\n args = parser.parse_args()\n args.verbose |= args.dry_run # Always print a detailed report in case of dry run.\n return args\n\n\ndef get_deploy_root() -> str:\n return os.path.realpath(\n os.path.normpath(os.path.join(os.path.dirname(__file__), \"..\", \"..\")),\n )\n\n\ndef get_deployment_version(extract_path: str) -> str:\n version = \"0.0.0\"\n for item in os.listdir(extract_path):\n item_path = os.path.join(extract_path, item)\n if item.startswith(\"zulip-server\") and os.path.isdir(item_path):\n with open(os.path.join(item_path, \"version.py\")) as f:\n result = re.search('ZULIP_VERSION = \"(.*)\"', f.read())\n if result:\n version = result.groups()[0]\n break\n return version\n\n\ndef is_invalid_upgrade(current_version: str, new_version: str) -> bool:\n if new_version > \"1.4.3\" and current_version <= \"1.3.10\":\n return True\n return False\n\n\ndef get_zulip_pwent() -> pwd.struct_passwd:\n deploy_root_uid = os.stat(get_deploy_root()).st_uid\n if deploy_root_uid != 0:\n return pwd.getpwuid(deploy_root_uid)\n\n # In the case that permissions got messed up and the deployment\n # directory is unexpectedly owned by root, we fallback to the\n # `zulip` user as that's the correct value in production.\n return pwd.getpwnam(\"zulip\")\n\n\ndef get_postgres_pwent() -> pwd.struct_passwd:\n try:\n return pwd.getpwnam(\"postgres\")\n except KeyError:\n return get_zulip_pwent()\n\n\ndef su_to_zulip(save_suid: bool = False) -> None:\n \"\"\"Warning: su_to_zulip assumes that the zulip checkout is owned by\n the zulip user (or whatever normal user is running the Zulip\n installation). It should never be run from the installer or other\n production contexts before /home/zulip/deployments/current is\n created.\"\"\"\n pwent = get_zulip_pwent()\n os.setgid(pwent.pw_gid)\n if save_suid:\n os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid())\n else:\n os.setuid(pwent.pw_uid)\n os.environ[\"HOME\"] = pwent.pw_dir\n\n\ndef make_deploy_path() -> str:\n timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT)\n return os.path.join(DEPLOYMENTS_DIR, timestamp)\n\n\nTEMPLATE_DATABASE_DIR = \"test-backend/databases\"\n\n\ndef get_dev_uuid_var_path(create_if_missing: bool = False) -> str:\n zulip_path = get_deploy_root()\n uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), \".zulip-dev-uuid\")\n if os.path.exists(uuid_path):\n with open(uuid_path) as f:\n zulip_uuid = f.read().strip()\n else:\n if create_if_missing:\n zulip_uuid = str(uuid.uuid4())\n # We need root access here, since the path will be under /srv/ in the\n # development environment.\n run_as_root([\"sh\", \"-c\", 'echo \"$1\" > \"$2\"', \"-\", zulip_uuid, uuid_path])\n else:\n raise AssertionError(\"Missing UUID file; please run tools/provision!\")\n\n result_path = os.path.join(zulip_path, \"var\", zulip_uuid)\n os.makedirs(result_path, exist_ok=True)\n return result_path\n\n\ndef get_deployment_lock(error_rerun_script: str) -> None:\n start_time = time.time()\n got_lock = False\n while time.time() - start_time < 300:\n try:\n os.mkdir(LOCK_DIR)\n got_lock = True\n break\n except OSError:\n print(\n WARNING\n + \"Another deployment in progress; waiting for lock... \"\n + f\"(If no deployment is running, rmdir {LOCK_DIR})\"\n + ENDC,\n flush=True,\n )\n time.sleep(3)\n\n if not got_lock:\n print(\n FAIL\n + \"Deployment already in progress. Please run\\n\"\n + f\" {error_rerun_script}\\n\"\n + \"manually when the previous deployment finishes, or run\\n\"\n + f\" rmdir {LOCK_DIR}\\n\"\n + \"if the previous deployment crashed.\"\n + ENDC\n )\n sys.exit(1)\n\n\ndef release_deployment_lock() -> None:\n shutil.rmtree(LOCK_DIR)\n\n\ndef run(args: Sequence[str], **kwargs: Any) -> None:\n # Output what we're doing in the `set -x` style\n print(\"+ {}\".format(\" \".join(map(shlex.quote, args))), flush=True)\n\n try:\n subprocess.check_call(args, **kwargs)\n except subprocess.CalledProcessError:\n print()\n print(\n WHITEONRED\n + \"Error running a subcommand of {}: {}\".format(\n sys.argv[0],\n \" \".join(map(shlex.quote, args)),\n )\n + ENDC\n )\n print(WHITEONRED + \"Actual error output for the subcommand is just above this.\" + ENDC)\n print()\n sys.exit(1)\n\n\ndef log_management_command(cmd: Sequence[str], log_path: str) -> None:\n log_dir = os.path.dirname(log_path)\n if not os.path.exists(log_dir):\n os.makedirs(log_dir)\n\n formatter = logging.Formatter(\"%(asctime)s: %(message)s\")\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(formatter)\n logger = logging.getLogger(\"zulip.management\")\n logger.addHandler(file_handler)\n logger.setLevel(logging.INFO)\n\n logger.info(\"Ran %s\", \" \".join(map(shlex.quote, cmd)))\n\n\ndef get_environment() -> str:\n if os.path.exists(DEPLOYMENTS_DIR):\n return \"prod\"\n return \"dev\"\n\n\ndef get_recent_deployments(threshold_days: int) -> Set[str]:\n # Returns a list of deployments not older than threshold days\n # including `/root/zulip` directory if it exists.\n recent = set()\n threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n for dir_name in os.listdir(DEPLOYMENTS_DIR):\n target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name)\n if not os.path.isdir(target_dir):\n # Skip things like uwsgi sockets, symlinks, etc.\n continue\n if not os.path.exists(os.path.join(target_dir, \"zerver\")):\n # Skip things like \"lock\" that aren't actually a deployment directory\n continue\n try:\n date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT)\n if date >= threshold_date:\n recent.add(target_dir)\n except ValueError:\n # Always include deployments whose name is not in the format of a timestamp.\n recent.add(target_dir)\n # If it is a symlink then include the target as well.\n if os.path.islink(target_dir):\n recent.add(os.path.realpath(target_dir))\n if os.path.exists(\"/root/zulip\"):\n recent.add(\"/root/zulip\")\n return recent\n\n\ndef get_threshold_timestamp(threshold_days: int) -> int:\n # Given number of days, this function returns timestamp corresponding\n # to the time prior to given number of days.\n threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days)\n threshold_timestamp = int(time.mktime(threshold.utctimetuple()))\n return threshold_timestamp\n\n\ndef get_caches_to_be_purged(\n caches_dir: str, caches_in_use: Set[str], threshold_days: int\n) -> Set[str]:\n # Given a directory containing caches, a list of caches in use\n # and threshold days, this function return a list of caches\n # which can be purged. Remove the cache only if it is:\n # 1: Not in use by the current installation(in dev as well as in prod).\n # 2: Not in use by a deployment not older than `threshold_days`(in prod).\n # 3: Not in use by '/root/zulip'.\n # 4: Not older than `threshold_days`.\n caches_to_purge = set()\n threshold_timestamp = get_threshold_timestamp(threshold_days)\n for cache_dir_base in os.listdir(caches_dir):\n cache_dir = os.path.join(caches_dir, cache_dir_base)\n if cache_dir in caches_in_use:\n # Never purge a cache which is in use.\n continue\n if os.path.getctime(cache_dir) < threshold_timestamp:\n caches_to_purge.add(cache_dir)\n return caches_to_purge\n\n\ndef purge_unused_caches(\n caches_dir: str,\n caches_in_use: Set[str],\n cache_type: str,\n args: argparse.Namespace,\n) -> None:\n all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)}\n caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days)\n caches_to_keep = all_caches - caches_to_purge\n\n may_be_perform_purging(\n caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings\n )\n if args.verbose:\n print(\"Done!\")\n\n\ndef generate_sha1sum_emoji(zulip_path: str) -> str:\n sha = hashlib.sha1()\n\n filenames = [\n \"static/assets/zulip-emoji/zulip.png\",\n \"tools/setup/emoji/emoji_map.json\",\n \"tools/setup/emoji/build_emoji\",\n \"tools/setup/emoji/emoji_setup_utils.py\",\n \"tools/setup/emoji/emoji_names.py\",\n ]\n\n for filename in filenames:\n file_path = os.path.join(zulip_path, filename)\n with open(file_path, \"rb\") as reader:\n sha.update(reader.read())\n\n # Take into account the version of `emoji-datasource-google` package\n # while generating success stamp.\n PACKAGE_FILE_PATH = os.path.join(zulip_path, \"package.json\")\n with open(PACKAGE_FILE_PATH) as fp:\n parsed_package_file = json.load(fp)\n dependency_data = parsed_package_file[\"dependencies\"]\n\n if \"emoji-datasource-google\" in dependency_data:\n with open(os.path.join(zulip_path, \"yarn.lock\")) as fp:\n (emoji_datasource_version,) = re.findall(\n r\"^emoji-datasource-google@\"\n + re.escape(dependency_data[\"emoji-datasource-google\"])\n + r':\\n version \"(.*)\"',\n fp.read(),\n re.M,\n )\n else:\n emoji_datasource_version = \"0\"\n sha.update(emoji_datasource_version.encode())\n\n return sha.hexdigest()\n\n\ndef may_be_perform_purging(\n dirs_to_purge: Set[str],\n dirs_to_keep: Set[str],\n dir_type: str,\n dry_run: bool,\n verbose: bool,\n no_headings: bool,\n) -> None:\n if dry_run:\n print(\"Performing a dry run...\")\n if not no_headings:\n print(f\"Cleaning unused {dir_type}s...\")\n\n for directory in dirs_to_purge:\n if verbose:\n print(f\"Cleaning unused {dir_type}: {directory}\")\n if not dry_run:\n run_as_root([\"rm\", \"-rf\", directory])\n\n for directory in dirs_to_keep:\n if verbose:\n print(f\"Keeping used {dir_type}: {directory}\")\n\n\[email protected]_cache(None)\ndef parse_os_release() -> Dict[str, str]:\n \"\"\"\n Example of the useful subset of the data:\n {\n 'ID': 'ubuntu',\n 'VERSION_ID': '18.04',\n 'NAME': 'Ubuntu',\n 'VERSION': '18.04.3 LTS (Bionic Beaver)',\n 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS',\n }\n\n VERSION_CODENAME (e.g. 'bionic') is nice and readable to Ubuntu\n developers, but we avoid using it, as it is not available on\n RHEL-based platforms.\n \"\"\"\n distro_info = {} # type: Dict[str, str]\n with open(\"/etc/os-release\") as fp:\n for line in fp:\n line = line.strip()\n if not line or line.startswith(\"#\"):\n # The line may be blank or a comment, see:\n # https://www.freedesktop.org/software/systemd/man/os-release.html\n continue\n k, v = line.split(\"=\", 1)\n [distro_info[k]] = shlex.split(v)\n return distro_info\n\n\[email protected]_cache(None)\ndef os_families() -> Set[str]:\n \"\"\"\n Known families:\n debian (includes: debian, ubuntu)\n ubuntu (includes: ubuntu)\n fedora (includes: fedora, rhel, centos)\n rhel (includes: rhel, centos)\n centos (includes: centos)\n \"\"\"\n distro_info = parse_os_release()\n return {distro_info[\"ID\"], *distro_info.get(\"ID_LIKE\", \"\").split()}\n\n\ndef files_and_string_digest(filenames: Sequence[str], extra_strings: Sequence[str]) -> str:\n # see is_digest_obsolete for more context\n sha1sum = hashlib.sha1()\n for fn in filenames:\n with open(fn, \"rb\") as file_to_hash:\n sha1sum.update(file_to_hash.read())\n\n for extra_string in extra_strings:\n sha1sum.update(extra_string.encode())\n\n return sha1sum.hexdigest()\n\n\ndef is_digest_obsolete(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> bool:\n \"\"\"\n In order to determine if we need to run some\n process, we calculate a digest of the important\n files and strings whose respective contents\n or values may indicate such a need.\n\n filenames = files we should hash the contents of\n extra_strings = strings we should hash directly\n\n Grep for callers to see examples of how this is used.\n\n To elaborate on extra_strings, they will typically\n be things like:\n\n - package versions (that we import)\n - settings values (that we stringify with\n json, deterministically)\n \"\"\"\n last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n try:\n with open(last_hash_path) as f:\n old_hash = f.read()\n except FileNotFoundError:\n # This is normal for a fresh checkout--a missing\n # digest is an obsolete digest.\n return True\n\n new_hash = files_and_string_digest(filenames, extra_strings)\n\n return new_hash != old_hash\n\n\ndef write_new_digest(\n hash_name: str, filenames: Sequence[str], extra_strings: Sequence[str] = []\n) -> None:\n hash_path = os.path.join(get_dev_uuid_var_path(), hash_name)\n new_hash = files_and_string_digest(filenames, extra_strings)\n with open(hash_path, \"w\") as f:\n f.write(new_hash)\n\n # Be a little verbose here--our callers ensure we\n # only write new digests when things have changed, and\n # making this system more transparent to developers\n # can help them troubleshoot provisioning glitches.\n print(\"New digest written to: \" + hash_path)\n\n\ndef is_root() -> bool:\n if \"posix\" in os.name and os.geteuid() == 0:\n return True\n return False\n\n\ndef run_as_root(args: List[str], **kwargs: Any) -> None:\n sudo_args = kwargs.pop(\"sudo_args\", [])\n if not is_root():\n args = [\"sudo\", *sudo_args, \"--\", *args]\n run(args, **kwargs)\n\n\ndef assert_not_running_as_root() -> None:\n script_name = os.path.abspath(sys.argv[0])\n if is_root():\n pwent = get_zulip_pwent()\n msg = (\n \"{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\\n\"\n \"user before rerunning this, or use \\n su {user} -c '{name} ...'\\n\"\n \"to switch users and run this as a single command.\"\n ).format(name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name)\n print(msg)\n sys.exit(1)\n\n\ndef assert_running_as_root(strip_lib_from_paths: bool = False) -> None:\n script_name = os.path.abspath(sys.argv[0])\n # Since these Python scripts are run inside a thin shell wrapper,\n # we need to replace the paths in order to ensure we instruct\n # users to (re)run the right command.\n if strip_lib_from_paths:\n script_name = script_name.replace(\"scripts/lib/upgrade\", \"scripts/upgrade\")\n if not is_root():\n print(f\"{script_name} must be run as root.\")\n sys.exit(1)\n\n\ndef get_config(\n config_file: configparser.RawConfigParser,\n section: str,\n key: str,\n default_value: str = \"\",\n) -> str:\n if config_file.has_option(section, key):\n return config_file.get(section, key)\n return default_value\n\n\ndef get_config_file() -> configparser.RawConfigParser:\n config_file = configparser.RawConfigParser()\n config_file.read(\"/etc/zulip/zulip.conf\")\n return config_file\n\n\ndef get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]:\n return get_config(config_file, \"deployment\", \"deploy_options\", \"\").strip().split()\n\n\ndef run_psql_as_postgres(\n config_file: configparser.RawConfigParser,\n sql_query: str,\n) -> None:\n dbname = get_config(config_file, \"postgresql\", \"database_name\", \"zulip\")\n subcmd = \" \".join(\n map(\n shlex.quote,\n [\n \"psql\",\n \"-v\",\n \"ON_ERROR_STOP=1\",\n \"-d\",\n dbname,\n \"-c\",\n sql_query,\n ],\n )\n )\n subprocess.check_call([\"su\", \"postgres\", \"-c\", subcmd])\n\n\ndef get_tornado_ports(config_file: configparser.RawConfigParser) -> List[int]:\n ports = []\n if config_file.has_section(\"tornado_sharding\"):\n ports = [int(port) for port in config_file.options(\"tornado_sharding\")]\n if not ports:\n ports = [9800]\n return ports\n\n\ndef get_or_create_dev_uuid_var_path(path: str) -> str:\n absolute_path = f\"{get_dev_uuid_var_path()}/{path}\"\n os.makedirs(absolute_path, exist_ok=True)\n return absolute_path\n\n\ndef is_vagrant_env_host(path: str) -> bool:\n return \".vagrant\" in os.listdir(path)\n\n\ndef has_application_server(once: bool = False) -> bool:\n if once:\n return os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip-once.conf\")\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip.conf\")\n )\n\n\ndef list_supervisor_processes(*args: str) -> List[str]:\n worker_status = subprocess.run(\n [\"supervisorctl\", \"status\", *args],\n universal_newlines=True,\n stdout=subprocess.PIPE,\n )\n # `supervisorctl status` returns 3 if any are stopped, which is\n # fine here; and exit code 4 is for no such process, which is\n # handled below.\n if worker_status.returncode not in (0, 3, 4):\n worker_status.check_returncode()\n\n processes = []\n for status_line in worker_status.stdout.splitlines():\n if not re.search(r\"ERROR \\(no such (process|group)\\)\", status_line):\n processes.append(status_line.split()[0])\n return processes\n\n\ndef has_process_fts_updates() -> bool:\n return (\n # Current path\n os.path.exists(\"/etc/supervisor/conf.d/zulip/zulip_db.conf\")\n # Old path, relevant for upgrades\n or os.path.exists(\"/etc/supervisor/conf.d/zulip_db.conf\")\n )\n\n\ndef deport(netloc: str) -> str:\n \"\"\"Remove the port from a hostname:port string. Brackets on a literal\n IPv6 address are included.\"\"\"\n r = SplitResult(\"\", netloc, \"\", \"\", \"\")\n assert r.hostname is not None\n return \"[\" + r.hostname + \"]\" if \":\" in r.hostname else r.hostname\n\n\ndef start_arg_parser(action: str, add_help: bool = False) -> argparse.ArgumentParser:\n parser = argparse.ArgumentParser(add_help=add_help)\n parser.add_argument(\"--fill-cache\", action=\"store_true\", help=\"Fill the memcached caches\")\n if action == \"restart\":\n parser.add_argument(\n \"--less-graceful\",\n action=\"store_true\",\n help=\"Restart with more concern for expediency than minimizing availability interruption\",\n )\n parser.add_argument(\n \"--skip-tornado\",\n action=\"store_true\",\n help=\"Do not restart Tornado processes\",\n )\n return parser\n\n\nif __name__ == \"__main__\":\n cmd = sys.argv[1]\n if cmd == \"make_deploy_path\":\n print(make_deploy_path())\n elif cmd == \"get_dev_uuid\":\n print(get_dev_uuid_var_path())\n",
"path": "scripts/lib/zulip_tools.py"
}
] | diff --git a/docs/production/deployment.md b/docs/production/deployment.md
index e7f5cf678f78b..4b8a782a4d4af 100644
--- a/docs/production/deployment.md
+++ b/docs/production/deployment.md
@@ -623,6 +623,14 @@ override is useful both Docker systems (where the above algorithm
might see the host's memory, not the container's) and/or when using
remote servers for postgres, memcached, redis, and RabbitMQ.
+#### `rolling_restart`
+
+If set to a non-empty value, when using `./scripts/restart-server` to
+restart Zulip, restart the uwsgi processes one-at-a-time, instead of
+all at once. This decreases the number of 502's served to clients, at
+the cost of slightly increased memory usage, and the possibility that
+different requests will be served by different versions of the code.
+
#### `uwsgi_buffer_size`
Override the default uwsgi buffer size of 8192.
diff --git a/puppet/zulip/manifests/app_frontend_base.pp b/puppet/zulip/manifests/app_frontend_base.pp
index 011763aae0c01..fade71baf5e90 100644
--- a/puppet/zulip/manifests/app_frontend_base.pp
+++ b/puppet/zulip/manifests/app_frontend_base.pp
@@ -119,6 +119,12 @@
notify => Service[$zulip::common::supervisor_service],
}
+ $uwsgi_rolling_restart = zulipconf('application_server', 'rolling_restart', '')
+ if $uwsgi_rolling_restart == '' {
+ file { '/home/zulip/deployments/uwsgi-control':
+ ensure => absent,
+ }
+ }
$uwsgi_listen_backlog_limit = zulipconf('application_server', 'uwsgi_listen_backlog_limit', 128)
$uwsgi_buffer_size = zulipconf('application_server', 'uwsgi_buffer_size', 8192)
$uwsgi_processes = zulipconf('application_server', 'uwsgi_processes', $uwsgi_default_processes)
diff --git a/puppet/zulip/templates/uwsgi.ini.template.erb b/puppet/zulip/templates/uwsgi.ini.template.erb
index 6d72b2b9c48cf..c43d66ec350cb 100644
--- a/puppet/zulip/templates/uwsgi.ini.template.erb
+++ b/puppet/zulip/templates/uwsgi.ini.template.erb
@@ -16,6 +16,13 @@ gid=zulip
stats=/home/zulip/deployments/uwsgi-stats
+<% if @uwsgi_rolling_restart != '' -%>
+master-fifo=/home/zulip/deployments/uwsgi-control
+# lazy-apps are required for rolling restarts:
+# https://uwsgi-docs.readthedocs.io/en/latest/articles/TheArtOfGracefulReloading.html#preforking-vs-lazy-apps-vs-lazy
+lazy-apps=true
+<% end -%>
+
ignore-sigpipe = true
ignore-write-errors = true
disable-write-exception = true
diff --git a/scripts/lib/zulip_tools.py b/scripts/lib/zulip_tools.py
index f4669cc110bd9..862e1ff4c1efb 100755
--- a/scripts/lib/zulip_tools.py
+++ b/scripts/lib/zulip_tools.py
@@ -623,7 +623,7 @@ def list_supervisor_processes(*args: str) -> List[str]:
universal_newlines=True,
stdout=subprocess.PIPE,
)
- # `supercisorctl status` returns 3 if any are stopped, which is
+ # `supervisorctl status` returns 3 if any are stopped, which is
# fine here; and exit code 4 is for no such process, which is
# handled below.
if worker_status.returncode not in (0, 3, 4):
diff --git a/scripts/restart-server b/scripts/restart-server
index a8bb94780a6e1..6923988a6676d 100755
--- a/scripts/restart-server
+++ b/scripts/restart-server
@@ -13,6 +13,7 @@ from scripts.lib.zulip_tools import (
ENDC,
OKGREEN,
WARNING,
+ get_config,
get_config_file,
get_tornado_ports,
has_application_server,
@@ -128,8 +129,29 @@ if has_application_server():
subprocess.check_call(["supervisorctl", action, "zulip-tornado:*"])
# Finally, restart the Django uWSGI processes.
- logging.info("%s django server", verbing)
- subprocess.check_call(["supervisorctl", action, "zulip-django"])
+ if (
+ action == "restart"
+ and not args.less_graceful
+ and get_config(config_file, "application_server", "rolling_restart") != ""
+ and os.path.exists("/home/zulip/deployments/uwsgi-control")
+ ):
+ # See if it's currently running
+ uwsgi_status = subprocess.run(
+ ["supervisorctl", "status", "zulip-django"],
+ stdout=subprocess.DEVNULL,
+ )
+ if uwsgi_status.returncode == 0:
+ logging.info("Starting rolling restart of django server")
+ with open("/home/zulip/deployments/uwsgi-control", "w") as control_socket:
+ # "c" is chain-reloading:
+ # https://uwsgi-docs.readthedocs.io/en/latest/MasterFIFO.html#available-commands
+ control_socket.write("c")
+ else:
+ logging.info("Starting django server")
+ subprocess.check_call(["supervisorctl", "start", "zulip-django"])
+ else:
+ logging.info("%s django server", verbing)
+ subprocess.check_call(["supervisorctl", action, "zulip-django"])
using_sso = subprocess.check_output(["./scripts/get-django-setting", "USING_APACHE_SSO"])
if using_sso.strip() == b"True":
| Support restarting the server without rejecting any requests
In theory, it should be possible with uwsgi and its `master=true` setting to restart the server with 0 requests being rejected due to the service being down (the approach seems to be the obvious thing of queuing requests in the socket until the new processes are up). I tried this briefly with our supervisord and ran into problems where it would just fail to restart, so some investigation is required into how to do this properly.
|
OCHA-DAP__hdx-ckan-1798 | [
{
"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n return h.url_for('show_crisis', page=page)\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n",
"path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"
}
] | [
{
"content": "'''\nCreated on Nov 3, 2014\n\n@author: alexandru-m-g\n'''\n\nimport logging\nimport datetime as dt\nimport decimal\n\nimport pylons.config as config\n\nimport ckan.lib.base as base\nimport ckan.logic as logic\nimport ckan.model as model\nimport ckan.common as common\nimport ckan.lib.helpers as h\n\nrender = base.render\nget_action = logic.get_action\nc = common.c\nrequest = common.request\n_ = common._\n\nDecimal = decimal.Decimal\n\nlog = logging.getLogger(__name__)\n\n\nclass CrisisController(base.BaseController):\n\n def show(self):\n\n context = {'model': model, 'session': model.Session,\n 'user': c.user or c.author, 'for_view': True,\n 'auth_user_obj': c.userobj}\n\n datastore_resource_id = self._get_datastore_resource_id(\n context, config.get('hdx.crisis.ebola_dataset', None), config.get('hdx.crisis.ebola_resource_title', None))\n if datastore_resource_id:\n c.top_line_items = self._get_top_line_items(\n context, datastore_resource_id)\n\n limit = 25\n c.q = u'ebola'\n\n page = int(request.params.get('page', 1))\n data_dict = {'sort': u'metadata_modified desc',\n 'fq': '+dataset_type:dataset',\n 'rows': limit,\n 'q': c.q,\n 'start': (page - 1) * limit\n }\n query = get_action(\"package_search\")(context, data_dict)\n\n def pager_url(q=None, page=None):\n url = h.url_for('show_crisis', page=page) + '#datasets-section'\n return url\n\n c.page = h.Page(\n collection=query['results'],\n page=page,\n url=pager_url,\n item_count=query['count'],\n items_per_page=limit\n )\n c.items = query['results']\n c.item_count = query['count']\n\n c.other_links = {}\n c.other_links['show_more'] = h.url_for(\n \"search\", **{'q': u'ebola', 'sort': u'metadata_modified desc',\n 'ext_indicator': '0'})\n\n return render('crisis/crisis.html')\n\n def _get_decimal_value(self, value):\n decimal_value = Decimal(str(value)).quantize(\n Decimal('.1'), rounding=decimal.ROUND_HALF_UP)\n return decimal_value\n\n def _format_results(self, result):\n for r in result['records']:\n d = dt.datetime.strptime(r[u'latest_date'], '%Y-%m-%dT%H:%M:%S')\n r[u'latest_date'] = dt.datetime.strftime(d, '%b %d, %Y')\n\n modified_value = r[u'value']\n if r[u'units'] == 'ratio':\n modified_value *= 100.0\n elif r[u'units'] == 'million':\n modified_value /= 1000000.0\n\n int_value = int(modified_value)\n if int_value == modified_value:\n r[u'formatted_value'] = '{:,}'.format(int_value)\n else:\n if r[u'units'] == 'ratio':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n elif r[u'units'] == 'million':\n r[u'formatted_value'] = '{:,.1f}'.format(\n self._get_decimal_value(modified_value))\n #r[u'formatted_value'] += ' ' + _('million')\n\n def _get_top_line_items(self, context, datastore_resource_id):\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n result = get_action('datastore_search')(\n modified_context, {'resource_id': datastore_resource_id})\n if 'records' in result:\n self._format_results(result)\n return result['records']\n return []\n\n def _get_datastore_resource_id(self, context, dataset_id, resource_name):\n try:\n modified_context = dict(context)\n modified_context['ignore_auth'] = True\n dataset = get_action('package_show')(\n modified_context, {'id': dataset_id})\n\n if 'resources' in dataset:\n for r in dataset['resources']:\n if 'datastore_active' in r and r['datastore_active'] \\\n and r['name'] == resource_name:\n return r['id']\n return None\n except:\n log.warning('No dataset with id ' + dataset_id)\n return None\n",
"path": "ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py"
}
] | diff --git a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
index 6ebe4ce63a..2c261a6c48 100644
--- a/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
+++ b/ckanext-hdx_crisis/ckanext/hdx_crisis/controllers/crisis_controller.py
@@ -54,7 +54,8 @@ def show(self):
query = get_action("package_search")(context, data_dict)
def pager_url(q=None, page=None):
- return h.url_for('show_crisis', page=page)
+ url = h.url_for('show_crisis', page=page) + '#datasets-section'
+ return url
c.page = h.Page(
collection=query['results'],
diff --git a/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html
index db5baaa159..5df8ded91c 100644
--- a/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html
+++ b/ckanext-hdx_theme/ckanext/hdx_theme/templates/crisis/crisis.html
@@ -110,7 +110,7 @@
Datasets [{{c.item_count}}]
</span>
<span class="mL15 list-header-showall">
- <a href="{{ c.other_links.show_more }}">Show all datasets</a>
+ <a id="datasets-section" href="{{ c.other_links.show_more }}">Show all datasets</a>
</span>
</div>
| Ebola page: loading second page of datasets reloads to top of page
Would it be easy to have it load the page at the `Datasets [41]` line?
|
plone__Products.CMFPlone-3529 | [
{
"content": "from AccessControl import getSecurityManager\nfrom AccessControl.Permissions import view as View\nfrom OFS.interfaces import IApplication\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFPlone.factory import _DEFAULT_PROFILE\nfrom Products.CMFPlone.factory import addPloneSite\nfrom plone.base.interfaces import INonInstallable\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.utils import get_installer\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.GenericSetup import BASE, EXTENSION\nfrom Products.GenericSetup import profile_registry\nfrom Products.GenericSetup.upgrade import normalize_version\nfrom ZPublisher.BaseRequest import DefaultPublishTraverse\nfrom collections import OrderedDict\nfrom plone.i18n.locales.interfaces import IContentLanguageAvailability\nfrom plone.keyring.interfaces import IKeyManager\nfrom plone.protect.authenticator import check as checkCSRF\nfrom plone.protect.interfaces import IDisableCSRFProtection\nfrom urllib import parse\nfrom ZODB.broken import Broken\nfrom zope.component import adapts\nfrom zope.component import getAllUtilitiesRegisteredFor\nfrom zope.component import getUtility\nfrom zope.component import queryMultiAdapter\nfrom zope.component import queryUtility\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.i18n.locales import locales, LoadLocaleError\nfrom zope.interface import Interface\nfrom zope.interface import alsoProvides\nfrom zope.publisher.browser import BrowserView\nfrom zope.publisher.interfaces import IRequest\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport logging\nimport pkg_resources\n\n\ntry:\n pkg_resources.get_distribution(\"plone.volto\")\n HAS_VOLTO = True\nexcept pkg_resources.DistributionNotFound:\n HAS_VOLTO = False\nLOGGER = logging.getLogger('Products.CMFPlone')\n\n\nclass AppTraverser(DefaultPublishTraverse):\n adapts(IApplication, IRequest)\n\n def publishTraverse(self, request, name):\n if name == 'index_html':\n view = queryMultiAdapter(\n (self.context, request), Interface, 'plone-overview')\n if view is not None:\n return view\n return DefaultPublishTraverse.publishTraverse(self, request, name)\n\n\nclass Overview(BrowserView):\n has_volto = HAS_VOLTO\n\n def sites(self, root=None):\n if root is None:\n root = self.context\n\n result = []\n secman = getSecurityManager()\n candidates = (\n obj for obj in root.values() if not isinstance(obj, Broken)\n )\n for obj in candidates:\n if obj.meta_type == 'Folder':\n result = result + self.sites(obj)\n elif IPloneSiteRoot.providedBy(obj):\n if secman.checkPermission(View, obj):\n result.append(obj)\n elif obj.getId() in getattr(root, '_mount_points', {}):\n result.extend(self.sites(root=obj))\n return result\n\n def outdated(self, obj):\n # Try to pick the portal_migration as an attribute\n # (Plone 5 unmigrated site root) or as an item\n mig = (\n getattr(obj, \"portal_migration\", None)\n or obj.get('portal_migration', None)\n )\n if mig is not None:\n return mig.needUpgrading()\n return False\n\n def can_manage(self):\n secman = getSecurityManager()\n return secman.checkPermission(ManagePortal, self.context)\n\n def upgrade_url(self, site, can_manage=None):\n if can_manage is None:\n can_manage = self.can_manage()\n if can_manage:\n return site.absolute_url() + '/@@plone-upgrade'\n else:\n return self.context.absolute_url() + '/@@plone-root-login'\n\n\nclass RootLoginRedirect(BrowserView):\n \"\"\" @@plone-root-login\n\n This view of the Zope root forces authentication via the root\n acl_users and then redirects elsewhere.\n \"\"\"\n\n def __call__(self, came_from=None):\n if came_from is not None:\n # see if this is a relative url or an absolute\n if len(parse.urlparse(came_from)[1]) == 0:\n # No host specified, so url is relative. Get an absolute url.\n # Note: '\\\\domain.org' is not recognised as host,\n # which is good.\n came_from = parse.urljoin(\n self.context.absolute_url() + '/', came_from,\n )\n elif not came_from.startswith(self.context.absolute_url()):\n # Note: we cannot use portal_url.isURLInPortal here, because we\n # are not in a Plone portal, but in the Zope root.\n came_from = None\n if came_from is None:\n came_from = self.context.absolute_url()\n self.request.response.redirect(came_from)\n\n\nclass RootLogout(BrowserView):\n \"\"\" @@plone-root-logout \"\"\"\n\n logout = ViewPageTemplateFile('templates/plone-admin-logged-out.pt')\n\n def __call__(self):\n response = self.request.response\n realm = response.realm\n response.setStatus(401)\n response.setHeader('WWW-Authenticate', 'basic realm=\"%s\"' % realm, 1)\n response.setBody(self.logout())\n return\n\n\nclass FrontPage(BrowserView):\n\n index = ViewPageTemplateFile('templates/plone-frontpage.pt')\n\n\nclass AddPloneSite(BrowserView):\n\n # Profiles that are installed by default,\n # but can be removed later.\n default_extension_profiles = (\n 'plone.app.caching:default',\n 'plonetheme.barceloneta:default',\n )\n # Let's have a separate list for Volto.\n volto_default_extension_profiles = (\n 'plone.app.caching:default',\n # We could choose to not install Barceloneta:\n 'plonetheme.barceloneta:default',\n 'plone.volto:default',\n 'plone.volto:default-homepage'\n )\n\n def profiles(self):\n base_profiles = []\n extension_profiles = []\n if HAS_VOLTO and not self.request.get('classic'):\n selected_extension_profiles = self.volto_default_extension_profiles\n else:\n selected_extension_profiles = self.default_extension_profiles\n\n # profiles available for install/uninstall, but hidden at the time\n # the Plone site is created\n not_installable = [\n 'Products.CMFPlacefulWorkflow:CMFPlacefulWorkflow',\n ]\n utils = getAllUtilitiesRegisteredFor(INonInstallable)\n for util in utils:\n not_installable.extend(util.getNonInstallableProfiles())\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == EXTENSION and \\\n info.get('for') in (IPloneSiteRoot, None):\n profile_id = info.get('id')\n if profile_id not in not_installable:\n if profile_id in selected_extension_profiles:\n info['selected'] = 'selected'\n extension_profiles.append(info)\n\n def _key(v):\n # Make sure implicitly selected items come first\n selected = v.get('selected') and 'automatic' or 'manual'\n return '{}-{}'.format(selected, v.get('title', ''))\n extension_profiles.sort(key=_key)\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == BASE and \\\n info.get('for') in (IPloneSiteRoot, None):\n base_profiles.append(info)\n\n return dict(\n base=tuple(base_profiles),\n default=_DEFAULT_PROFILE,\n extensions=tuple(extension_profiles),\n )\n\n def browser_language(self):\n language = 'en'\n pl = IUserPreferredLanguages(self.request)\n if pl is not None:\n languages = pl.getPreferredLanguages()\n for httplang in languages:\n parts = (httplang.split('-') + [None, None])[:3]\n if parts[0] == parts[1]:\n # Avoid creating a country code for simple languages codes\n parts = [parts[0], None, None]\n try:\n locale = locales.getLocale(*parts)\n language = locale.getLocaleID().replace('_', '-').lower()\n break\n except LoadLocaleError:\n # Just try the next combination\n pass\n return language\n\n def grouped_languages(self, default='en'):\n util = queryUtility(IContentLanguageAvailability)\n available = util.getLanguages(combined=True)\n languages = dict(util.getLanguageListing())\n\n # Group country specific versions by language\n grouped = OrderedDict()\n for langcode, data in available.items():\n lang = langcode.split('-')[0]\n language = languages.get(lang, lang) # Label\n\n struct = grouped.get(lang, {'label': language, 'languages': []})\n\n langs = struct['languages']\n langs.append({\n 'langcode': langcode,\n 'label': data.get('native', data.get('name')),\n })\n\n grouped[lang] = struct\n\n # Sort list by language, next by country\n data = sorted(grouped.values(), key=lambda k: k['label'])\n for item in data:\n item['languages'] = sorted(\n item['languages'], key=lambda k: k['label'].lower())\n return data\n\n def timezones(self):\n tz_vocab = getUtility(\n IVocabularyFactory,\n 'plone.app.vocabularies.CommonTimezones'\n )(self.context)\n\n grouped = OrderedDict()\n tz_values = [it.value for it in tz_vocab]\n for value in tz_values:\n splitted = value.split('/')\n group = splitted.pop(0)\n label = '/'.join(splitted)\n\n entries = grouped.get(group, [])\n entries.append({'label': label or group, 'value': value})\n grouped[group] = entries\n\n return grouped\n\n def __call__(self):\n context = self.context\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n site_id = form.get('site_id', 'Plone')\n\n # CSRF protect. DO NOT use auto CSRF protection for adding a site\n alsoProvides(self.request, IDisableCSRFProtection)\n\n # check if keyring is installed on root, disable CSRF protection\n # if it is because it is not installed until a plone site\n # is created\n if queryUtility(IKeyManager) is None:\n LOGGER.info('CSRF protection disabled on initial site '\n 'creation')\n else:\n # we have a keymanager, check csrf protection manually now\n checkCSRF(self.request)\n site = addPloneSite(\n context, site_id,\n title=form.get('title', ''),\n profile_id=form.get('profile_id', _DEFAULT_PROFILE),\n extension_ids=form.get('extension_ids', ()),\n setup_content=form.get('setup_content', False),\n default_language=form.get('default_language', 'en'),\n portal_timezone=form.get('portal_timezone', 'UTC')\n )\n self.request.response.redirect(site.absolute_url())\n return ''\n\n return self.index()\n\n\nclass Upgrade(BrowserView):\n\n def upgrades(self):\n pm = getattr(self.context, 'portal_migration')\n return pm.listUpgrades()\n\n def versions(self):\n pm = getattr(self.context, 'portal_migration')\n result = {}\n result['instance'] = pm.getInstanceVersion()\n result['fs'] = pm.getFileSystemVersion()\n result['equal'] = result['instance'] == result['fs']\n instance_version = normalize_version(result['instance'])\n fs_version = normalize_version(result['fs'])\n result['instance_gt'] = instance_version > fs_version\n result['instance_lt'] = instance_version < fs_version\n result['corelist'] = pm.coreVersions()\n return result\n\n def __call__(self):\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n # CSRF protect. DO NOT use auto CSRF protection for upgrading sites\n alsoProvides(self.request, IDisableCSRFProtection)\n\n pm = getattr(self.context, 'portal_migration')\n report = pm.upgrade(\n REQUEST=self.request,\n dry_run=form.get('dry_run', False),\n )\n return self.index(\n report=report,\n )\n\n return self.index()\n",
"path": "Products/CMFPlone/browser/admin.py"
}
] | [
{
"content": "from AccessControl import getSecurityManager\nfrom AccessControl.Permissions import view as View\nfrom OFS.interfaces import IApplication\nfrom Products.CMFCore.permissions import ManagePortal\nfrom Products.CMFPlone.factory import _DEFAULT_PROFILE\nfrom Products.CMFPlone.factory import addPloneSite\nfrom plone.base.interfaces import INonInstallable\nfrom plone.base.interfaces import IPloneSiteRoot\nfrom Products.CMFPlone.utils import get_installer\nfrom Products.Five.browser.pagetemplatefile import ViewPageTemplateFile\nfrom Products.GenericSetup import BASE, EXTENSION\nfrom Products.GenericSetup import profile_registry\nfrom Products.GenericSetup.upgrade import normalize_version\nfrom ZPublisher.BaseRequest import DefaultPublishTraverse\nfrom collections import OrderedDict\nfrom plone.i18n.locales.interfaces import IContentLanguageAvailability\nfrom plone.keyring.interfaces import IKeyManager\nfrom plone.protect.authenticator import check as checkCSRF\nfrom plone.protect.interfaces import IDisableCSRFProtection\nfrom urllib import parse\nfrom ZODB.broken import Broken\nfrom zope.component import adapts\nfrom zope.component import getAllUtilitiesRegisteredFor\nfrom zope.component import getUtility\nfrom zope.component import queryMultiAdapter\nfrom zope.component import queryUtility\nfrom zope.i18n.interfaces import IUserPreferredLanguages\nfrom zope.i18n.locales import locales, LoadLocaleError\nfrom zope.interface import Interface\nfrom zope.interface import alsoProvides\nfrom zope.publisher.browser import BrowserView\nfrom zope.publisher.interfaces import IRequest\nfrom zope.schema.interfaces import IVocabularyFactory\n\nimport logging\nimport pkg_resources\n\n\ntry:\n pkg_resources.get_distribution(\"plone.volto\")\n HAS_VOLTO = True\nexcept pkg_resources.DistributionNotFound:\n HAS_VOLTO = False\nLOGGER = logging.getLogger('Products.CMFPlone')\n\n\nclass AppTraverser(DefaultPublishTraverse):\n adapts(IApplication, IRequest)\n\n def publishTraverse(self, request, name):\n if name == 'index_html':\n view = queryMultiAdapter(\n (self.context, request), Interface, 'plone-overview')\n if view is not None:\n return view\n return DefaultPublishTraverse.publishTraverse(self, request, name)\n\n\nclass Overview(BrowserView):\n has_volto = HAS_VOLTO\n\n def sites(self, root=None):\n if root is None:\n root = self.context\n\n result = []\n secman = getSecurityManager()\n candidates = (\n obj for obj in root.values() if not isinstance(obj, Broken)\n )\n for obj in candidates:\n if obj.meta_type == 'Folder':\n result = result + self.sites(obj)\n elif IPloneSiteRoot.providedBy(obj):\n if secman.checkPermission(View, obj):\n result.append(obj)\n elif obj.getId() in getattr(root, '_mount_points', {}):\n result.extend(self.sites(root=obj))\n return result\n\n def outdated(self, obj):\n # Try to pick the portal_migration as an attribute\n # (Plone 5 unmigrated site root) or as an item\n mig = (\n getattr(obj, \"portal_migration\", None)\n or obj.get('portal_migration', None)\n )\n if mig is not None:\n return mig.needUpgrading()\n return False\n\n def can_manage(self):\n secman = getSecurityManager()\n return secman.checkPermission(ManagePortal, self.context)\n\n def upgrade_url(self, site, can_manage=None):\n if can_manage is None:\n can_manage = self.can_manage()\n if can_manage:\n return site.absolute_url() + '/@@plone-upgrade'\n else:\n return self.context.absolute_url() + '/@@plone-root-login'\n\n\nclass RootLoginRedirect(BrowserView):\n \"\"\" @@plone-root-login\n\n This view of the Zope root forces authentication via the root\n acl_users and then redirects elsewhere.\n \"\"\"\n\n def __call__(self, came_from=None):\n if came_from is not None:\n # see if this is a relative url or an absolute\n if len(parse.urlparse(came_from)[1]) == 0:\n # No host specified, so url is relative. Get an absolute url.\n # Note: '\\\\domain.org' is not recognised as host,\n # which is good.\n came_from = parse.urljoin(\n self.context.absolute_url() + '/', came_from,\n )\n elif not came_from.startswith(self.context.absolute_url()):\n # Note: we cannot use portal_url.isURLInPortal here, because we\n # are not in a Plone portal, but in the Zope root.\n came_from = None\n if came_from is None:\n came_from = self.context.absolute_url()\n self.request.response.redirect(came_from)\n\n\nclass RootLogout(BrowserView):\n \"\"\" @@plone-root-logout \"\"\"\n\n logout = ViewPageTemplateFile('templates/plone-admin-logged-out.pt')\n\n def __call__(self):\n response = self.request.response\n realm = response.realm\n response.setStatus(401)\n response.setHeader('WWW-Authenticate', 'basic realm=\"%s\"' % realm, 1)\n response.setBody(self.logout())\n return\n\n\nclass FrontPage(BrowserView):\n\n index = ViewPageTemplateFile('templates/plone-frontpage.pt')\n\n\nclass AddPloneSite(BrowserView):\n\n # Profiles that are installed by default,\n # but can be removed later.\n default_extension_profiles = (\n 'plone.app.caching:default',\n 'plonetheme.barceloneta:default',\n )\n # Let's have a separate list for Volto.\n volto_default_extension_profiles = (\n 'plone.app.caching:default',\n # We could choose to not install Barceloneta:\n 'plonetheme.barceloneta:default',\n 'plone.volto:default',\n 'plone.volto:default-homepage'\n )\n\n def profiles(self):\n base_profiles = []\n extension_profiles = []\n if HAS_VOLTO and not self.request.get('classic'):\n selected_extension_profiles = self.volto_default_extension_profiles\n else:\n selected_extension_profiles = self.default_extension_profiles\n\n # profiles available for install/uninstall, but hidden at the time\n # the Plone site is created\n not_installable = [\n 'Products.CMFPlacefulWorkflow:CMFPlacefulWorkflow',\n ]\n utils = getAllUtilitiesRegisteredFor(INonInstallable)\n for util in utils:\n not_installable.extend(util.getNonInstallableProfiles())\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == EXTENSION and \\\n info.get('for') in (IPloneSiteRoot, None):\n profile_id = info.get('id')\n if profile_id not in not_installable:\n if profile_id in selected_extension_profiles:\n info['selected'] = 'selected'\n extension_profiles.append(info)\n\n def _key(v):\n # Make sure implicitly selected items come first\n selected = v.get('selected') and 'automatic' or 'manual'\n return '{}-{}'.format(selected, v.get('title', ''))\n extension_profiles.sort(key=_key)\n\n for info in profile_registry.listProfileInfo():\n if info.get('type') == BASE and \\\n info.get('for') in (IPloneSiteRoot, None):\n base_profiles.append(info)\n\n return dict(\n base=tuple(base_profiles),\n default=_DEFAULT_PROFILE,\n extensions=tuple(extension_profiles),\n )\n\n def browser_language(self):\n language = 'en'\n pl = IUserPreferredLanguages(self.request)\n if pl is not None:\n languages = pl.getPreferredLanguages()\n for httplang in languages:\n parts = (httplang.split('-') + [None, None])[:3]\n if parts[0] == parts[1]:\n # Avoid creating a country code for simple languages codes\n parts = [parts[0], None, None]\n try:\n locale = locales.getLocale(*parts)\n language = locale.getLocaleID().replace('_', '-').lower()\n break\n except LoadLocaleError:\n # Just try the next combination\n pass\n return language\n\n def grouped_languages(self, default='en'):\n util = queryUtility(IContentLanguageAvailability)\n available = util.getLanguages(combined=True)\n languages = dict(util.getLanguageListing())\n\n # Group country specific versions by language\n grouped = OrderedDict()\n for langcode, data in available.items():\n lang = langcode.split('-')[0]\n language = languages.get(lang, lang) # Label\n\n struct = grouped.get(lang, {'label': language, 'languages': []})\n\n langs = struct['languages']\n langs.append({\n 'langcode': langcode,\n 'label': data.get('native', data.get('name')),\n })\n\n grouped[lang] = struct\n\n # Sort list by language, next by country\n data = sorted(grouped.values(), key=lambda k: k['label'])\n for item in data:\n item['languages'] = sorted(\n item['languages'], key=lambda k: k['label'].lower())\n return data\n\n def timezones(self):\n tz_vocab = getUtility(\n IVocabularyFactory,\n 'plone.app.vocabularies.CommonTimezones'\n )(self.context)\n\n grouped = OrderedDict()\n tz_values = [it.value for it in tz_vocab]\n for value in tz_values:\n splitted = value.split('/')\n group = splitted.pop(0)\n label = '/'.join(splitted)\n\n entries = grouped.get(group, [])\n entries.append({'label': label or group, 'value': value})\n grouped[group] = entries\n\n return grouped\n\n def __call__(self):\n context = self.context\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n site_id = form.get('site_id', 'Plone')\n\n # CSRF protect. DO NOT use auto CSRF protection for adding a site\n alsoProvides(self.request, IDisableCSRFProtection)\n\n # check if keyring is installed on root, disable CSRF protection\n # if it is because it is not installed until a plone site\n # is created\n if queryUtility(IKeyManager) is None:\n LOGGER.info('CSRF protection disabled on initial site '\n 'creation')\n else:\n # we have a keymanager, check csrf protection manually now\n checkCSRF(self.request)\n site = addPloneSite(\n context, site_id,\n title=form.get('title', ''),\n profile_id=form.get('profile_id', _DEFAULT_PROFILE),\n extension_ids=form.get('extension_ids', ()),\n setup_content=form.get('setup_content', False),\n default_language=form.get('default_language', 'en'),\n portal_timezone=form.get('portal_timezone', 'UTC')\n )\n self.request.response.redirect(site.absolute_url())\n return ''\n\n return self.index()\n\n\nclass Upgrade(BrowserView):\n\n def upgrades(self):\n pm = getattr(self.context, 'portal_migration')\n return pm.listUpgrades()\n\n def versions(self):\n pm = getattr(self.context, 'portal_migration')\n result = {}\n result['instance'] = pm.getInstanceVersion()\n result['fs'] = pm.getFileSystemVersion()\n result['equal'] = result['instance'] == result['fs']\n instance_version = normalize_version(result['instance'])\n fs_version = normalize_version(result['fs'])\n result['instance_gt'] = instance_version > fs_version\n result['instance_lt'] = instance_version < fs_version\n result['corelist'] = pm.coreVersions()\n return result\n\n def __call__(self):\n form = self.request.form\n submitted = form.get('form.submitted', False)\n if submitted:\n # CSRF protect. DO NOT use auto CSRF protection for upgrading sites\n alsoProvides(self.request, IDisableCSRFProtection)\n\n pm = getattr(self.context, 'portal_migration')\n report = pm.upgrade(\n REQUEST=self.request,\n dry_run=form.get('dry_run', False),\n )\n return self.index(\n report=report,\n )\n\n return self.index()\n\n def can_migrate_to_volto(self):\n if not HAS_VOLTO:\n return False\n pm = getattr(self.context, 'portal_migration')\n if pm.getInstanceVersion() < \"6005\":\n return False\n try:\n from plone.volto.browser import migrate_to_volto\n except ImportError:\n return False\n installer = get_installer(self.context, self.request)\n return not installer.is_product_installed(\"plone.volto\")\n",
"path": "Products/CMFPlone/browser/admin.py"
}
] | diff --git a/Products/CMFPlone/browser/admin.py b/Products/CMFPlone/browser/admin.py
index dfa3df808c..972a6c4789 100644
--- a/Products/CMFPlone/browser/admin.py
+++ b/Products/CMFPlone/browser/admin.py
@@ -343,3 +343,16 @@ def __call__(self):
)
return self.index()
+
+ def can_migrate_to_volto(self):
+ if not HAS_VOLTO:
+ return False
+ pm = getattr(self.context, 'portal_migration')
+ if pm.getInstanceVersion() < "6005":
+ return False
+ try:
+ from plone.volto.browser import migrate_to_volto
+ except ImportError:
+ return False
+ installer = get_installer(self.context, self.request)
+ return not installer.is_product_installed("plone.volto")
diff --git a/Products/CMFPlone/browser/templates/plone-upgrade.pt b/Products/CMFPlone/browser/templates/plone-upgrade.pt
index c4a09be6dd..21b66bbe9e 100644
--- a/Products/CMFPlone/browser/templates/plone-upgrade.pt
+++ b/Products/CMFPlone/browser/templates/plone-upgrade.pt
@@ -70,6 +70,16 @@
</dd>
</dl>
+ <tal:volto tal:condition="python: versions['equal'] and view.can_migrate_to_volto()">
+ <p class="alert-success p-2" i18n:translate="">
+ You can prepare your site for Volto, the default frontend of Plone 6!
+ </p>
+ <a class="p-2" i18n:translate=""
+ tal:attributes="href string:${context/absolute_url}/@@migrate_to_volto">
+ Click here if you want to learn more.
+ </a>
+ </tal:volto>
+
</div>
<form tal:condition="versions/instance_lt"
action="#"
diff --git a/news/3528.feature b/news/3528.feature
new file mode 100644
index 0000000000..6b0488bd07
--- /dev/null
+++ b/news/3528.feature
@@ -0,0 +1,2 @@
+Show link to the Volto-migration (@@migrate_to_volto) in the view @@plone-upgrade when the option is available.
+[pbauer]
\ No newline at end of file
| Allow to migrate to Volto after updating a site to Plone 6
When Plone is updated to 6 the upgrade-view (@@plone-upgrade) should display a link to the migration to Volto.
See https://github.com/plone/plone.volto/issues/55 for the migration itself.
|
Lightning-Universe__lightning-flash-597 | [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {repr(self)}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n",
"path": "flash/core/registry.py"
}
] | [
{
"content": "# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nfrom functools import partial\nfrom types import FunctionType\nfrom typing import Any, Callable, Dict, List, Optional, Union\n\nfrom pytorch_lightning.utilities import rank_zero_info\nfrom pytorch_lightning.utilities.exceptions import MisconfigurationException\n\n_REGISTERED_FUNCTION = Dict[str, Any]\n\n\nclass FlashRegistry:\n \"\"\"This class is used to register function or :class:`functools.partial` class to a registry.\"\"\"\n\n def __init__(self, name: str, verbose: bool = False) -> None:\n self.name = name\n self.functions: List[_REGISTERED_FUNCTION] = []\n self._verbose = verbose\n\n def __len__(self) -> int:\n return len(self.functions)\n\n def __contains__(self, key) -> bool:\n return any(key == e[\"name\"] for e in self.functions)\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}(name={self.name}, functions={self.functions})'\n\n def get(\n self,\n key: str,\n with_metadata: bool = False,\n strict: bool = True,\n **metadata,\n ) -> Union[Callable, _REGISTERED_FUNCTION, List[_REGISTERED_FUNCTION], List[Callable]]:\n \"\"\"\n This function is used to gather matches from the registry:\n\n Args:\n key: Name of the registered function.\n with_metadata: Whether to include the associated metadata in the return value.\n strict: Whether to return all matches or just one.\n metadata: Metadata used to filter against existing registry item's metadata.\n \"\"\"\n matches = [e for e in self.functions if key == e[\"name\"]]\n if not matches:\n raise KeyError(f\"Key: {key} is not in {type(self).__name__}\")\n\n if metadata:\n matches = [m for m in matches if metadata.items() <= m[\"metadata\"].items()]\n if not matches:\n raise KeyError(\"Found no matches that fit your metadata criteria. Try removing some metadata\")\n\n matches = [e if with_metadata else e[\"fn\"] for e in matches]\n return matches[0] if strict else matches\n\n def remove(self, key: str) -> None:\n self.functions = [f for f in self.functions if f[\"name\"] != key]\n\n def _register_function(\n self,\n fn: Callable,\n name: Optional[str] = None,\n override: bool = False,\n metadata: Optional[Dict[str, Any]] = None\n ):\n if not isinstance(fn, FunctionType) and not isinstance(fn, partial):\n raise MisconfigurationException(f\"You can only register a function, found: {fn}\")\n\n name = name or fn.__name__\n\n if self._verbose:\n rank_zero_info(f\"Registering: {fn.__name__} function with name: {name} and metadata: {metadata}\")\n\n item = {\"fn\": fn, \"name\": name, \"metadata\": metadata or {}}\n\n matching_index = self._find_matching_index(item)\n if override and matching_index is not None:\n self.functions[matching_index] = item\n else:\n if matching_index is not None:\n raise MisconfigurationException(\n f\"Function with name: {name} and metadata: {metadata} is already present within {self}.\"\n \" HINT: Use `override=True`.\"\n )\n self.functions.append(item)\n\n def _find_matching_index(self, item: _REGISTERED_FUNCTION) -> Optional[int]:\n for idx, fn in enumerate(self.functions):\n if all(fn[k] == item[k] for k in (\"fn\", \"name\", \"metadata\")):\n return idx\n\n def __call__(\n self,\n fn: Optional[Callable[..., Any]] = None,\n name: Optional[str] = None,\n override: bool = False,\n **metadata\n ) -> Callable:\n \"\"\"\n This function is used to register new functions to the registry along their metadata.\n\n Functions can be filtered using metadata using the ``get`` function.\n\n \"\"\"\n if fn is not None:\n self._register_function(fn=fn, name=name, override=override, metadata=metadata)\n return fn\n\n # raise the error ahead of time\n if not (name is None or isinstance(name, str)):\n raise TypeError(f'`name` must be a str, found {name}')\n\n def _register(cls):\n self._register_function(fn=cls, name=name, override=override, metadata=metadata)\n return cls\n\n return _register\n\n def available_keys(self) -> List[str]:\n return sorted(v[\"name\"] for v in self.functions)\n",
"path": "flash/core/registry.py"
}
] | diff --git a/flash/core/registry.py b/flash/core/registry.py
index ff3c99c336..61794424ce 100644
--- a/flash/core/registry.py
+++ b/flash/core/registry.py
@@ -56,7 +56,7 @@ def get(
"""
matches = [e for e in self.functions if key == e["name"]]
if not matches:
- raise KeyError(f"Key: {key} is not in {repr(self)}")
+ raise KeyError(f"Key: {key} is not in {type(self).__name__}")
if metadata:
matches = [m for m in matches if metadata.items() <= m["metadata"].items()]
diff --git a/tests/core/test_registry.py b/tests/core/test_registry.py
index 061c6f4504..3af891aa3a 100644
--- a/tests/core/test_registry.py
+++ b/tests/core/test_registry.py
@@ -28,19 +28,19 @@ def my_model(nc_input=5, nc_output=6):
return nn.Linear(nc_input, nc_output), nc_input, nc_output
with pytest.raises(MisconfigurationException, match="You can only register a function, found: Linear"):
- backbones(nn.Linear(1, 1), name="cho")
+ backbones(nn.Linear(1, 1), name="foo")
- backbones(my_model, name="cho", override=True)
+ backbones(my_model, name="foo", override=True)
- with pytest.raises(MisconfigurationException, match="Function with name: cho and metadata: {}"):
- backbones(my_model, name="cho", override=False)
+ with pytest.raises(MisconfigurationException, match="Function with name: foo and metadata: {}"):
+ backbones(my_model, name="foo", override=False)
with pytest.raises(KeyError, match="Found no matches"):
- backbones.get("cho", foo="bar")
+ backbones.get("foo", baz="bar")
- backbones.remove("cho")
- with pytest.raises(KeyError, match="Key: cho is not in FlashRegistry"):
- backbones.get("cho")
+ backbones.remove("foo")
+ with pytest.raises(KeyError, match="Key: foo is not in FlashRegistry"):
+ backbones.get("foo")
with pytest.raises(TypeError, match="name` must be a str"):
backbones(name=float) # noqa
@@ -59,30 +59,30 @@ def my_model(nc_input=5, nc_output=6):
assert mlp.weight.shape == (7, 5)
# basic get
- backbones(my_model, name="cho")
- assert backbones.get("cho")
+ backbones(my_model, name="foo")
+ assert backbones.get("foo")
# test override
- backbones(my_model, name="cho", override=True)
- functions = backbones.get("cho", strict=False)
+ backbones(my_model, name="foo", override=True)
+ functions = backbones.get("foo", strict=False)
assert len(functions) == 1
# test metadata filtering
- backbones(my_model, name="cho", namespace="timm", type="resnet")
- backbones(my_model, name="cho", namespace="torchvision", type="resnet")
- backbones(my_model, name="cho", namespace="timm", type="densenet")
- backbones(my_model, name="cho", namespace="timm", type="alexnet")
- function = backbones.get("cho", with_metadata=True, type="resnet", namespace="timm")
- assert function["name"] == "cho"
+ backbones(my_model, name="foo", namespace="timm", type="resnet")
+ backbones(my_model, name="foo", namespace="torchvision", type="resnet")
+ backbones(my_model, name="foo", namespace="timm", type="densenet")
+ backbones(my_model, name="foo", namespace="timm", type="alexnet")
+ function = backbones.get("foo", with_metadata=True, type="resnet", namespace="timm")
+ assert function["name"] == "foo"
assert function["metadata"] == {"namespace": "timm", "type": "resnet"}
# test strict=False and with_metadata=False
- functions = backbones.get("cho", namespace="timm", strict=False)
+ functions = backbones.get("foo", namespace="timm", strict=False)
assert len(functions) == 3
assert all(callable(f) for f in functions)
# test available keys
- assert backbones.available_keys() == ['cho', 'cho', 'cho', 'cho', 'cho', 'my_model']
+ assert backbones.available_keys() == ['foo', 'foo', 'foo', 'foo', 'foo', 'my_model']
# todo (tchaton) Debug this test.
| Confusing KerError message for flash registry
## 🐛 Bug
<!-- A clear and concise description of what the bug is. -->
### To Reproduce
Steps to reproduce the behavior:
```
from flash.image import ImageClassificationData, ImageClassifier
print(ImageClassifier.backbones.get('abcd'))
```
#### Code sample
<!-- Ideally attach a minimal code sample to reproduce the decried issue.
Minimal means having the shortest code but still preserving the bug. -->
### Expected behavior
It should throw a keyerror.
### Environment
- PyTorch Version (e.g., 1.0):
- OS (e.g., Linux):
- How you installed PyTorch (`conda`, `pip`, source):
- Build command you used (if compiling from source):
- Python version:
- CUDA/cuDNN version:
- GPU models and configuration:
- Any other relevant information:
### Additional context
Sending in PR.
|
benoitc__gunicorn-1071 | [
{
"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request\")\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n",
"path": "gunicorn/workers/base.py"
}
] | [
{
"content": "# -*- coding: utf-8 -\n#\n# This file is part of gunicorn released under the MIT license.\n# See the NOTICE for more information.\n\nfrom datetime import datetime\nimport os\nimport signal\nimport sys\nimport time\nimport traceback\nfrom random import randint\n\n\nfrom gunicorn import util\nfrom gunicorn.workers.workertmp import WorkerTmp\nfrom gunicorn.reloader import Reloader\nfrom gunicorn.http.errors import (\n InvalidHeader, InvalidHeaderName, InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, LimitRequestLine, LimitRequestHeaders,\n)\nfrom gunicorn.http.errors import InvalidProxyLine, ForbiddenProxyRequest\nfrom gunicorn.http.wsgi import default_environ, Response\nfrom gunicorn.six import MAXSIZE\n\n\nclass Worker(object):\n\n SIGNALS = [getattr(signal, \"SIG%s\" % x)\n for x in \"ABRT HUP QUIT INT TERM USR1 USR2 WINCH CHLD\".split()]\n\n PIPE = []\n\n def __init__(self, age, ppid, sockets, app, timeout, cfg, log):\n \"\"\"\\\n This is called pre-fork so it shouldn't do anything to the\n current process. If there's a need to make process wide\n changes you'll want to do that in ``self.init_process()``.\n \"\"\"\n self.age = age\n self.ppid = ppid\n self.sockets = sockets\n self.app = app\n self.timeout = timeout\n self.cfg = cfg\n self.booted = False\n self.aborted = False\n self.reloader = None\n\n self.nr = 0\n jitter = randint(0, cfg.max_requests_jitter)\n self.max_requests = cfg.max_requests + jitter or MAXSIZE\n self.alive = True\n self.log = log\n self.tmp = WorkerTmp(cfg)\n\n def __str__(self):\n return \"<Worker %s>\" % self.pid\n\n @property\n def pid(self):\n return os.getpid()\n\n def notify(self):\n \"\"\"\\\n Your worker subclass must arrange to have this method called\n once every ``self.timeout`` seconds. If you fail in accomplishing\n this task, the master process will murder your workers.\n \"\"\"\n self.tmp.notify()\n\n def run(self):\n \"\"\"\\\n This is the mainloop of a worker process. You should override\n this method in a subclass to provide the intended behaviour\n for your particular evil schemes.\n \"\"\"\n raise NotImplementedError()\n\n def init_process(self):\n \"\"\"\\\n If you override this method in a subclass, the last statement\n in the function should be to call this method with\n super(MyWorkerClass, self).init_process() so that the ``run()``\n loop is initiated.\n \"\"\"\n\n # start the reloader\n if self.cfg.reload:\n def changed(fname):\n self.log.info(\"Worker reloading: %s modified\", fname)\n os.kill(self.pid, signal.SIGQUIT)\n self.reloader = Reloader(callback=changed)\n self.reloader.start()\n\n # set environment' variables\n if self.cfg.env:\n for k, v in self.cfg.env.items():\n os.environ[k] = v\n\n util.set_owner_process(self.cfg.uid, self.cfg.gid)\n\n # Reseed the random number generator\n util.seed()\n\n # For waking ourselves up\n self.PIPE = os.pipe()\n for p in self.PIPE:\n util.set_non_blocking(p)\n util.close_on_exec(p)\n\n # Prevent fd inheritance\n [util.close_on_exec(s) for s in self.sockets]\n util.close_on_exec(self.tmp.fileno())\n\n self.log.close_on_exec()\n\n self.init_signals()\n\n self.cfg.post_worker_init(self)\n\n self.load_wsgi()\n\n # Enter main run loop\n self.booted = True\n self.run()\n\n def load_wsgi(self):\n try:\n self.wsgi = self.app.wsgi()\n except SyntaxError as e:\n if not self.cfg.reload:\n raise\n\n self.log.exception(e)\n\n exc_type, exc_val, exc_tb = sys.exc_info()\n self.reloader.add_extra_file(exc_val.filename)\n\n tb_string = traceback.format_exc(exc_tb)\n self.wsgi = util.make_fail_app(tb_string)\n\n def init_signals(self):\n # reset signaling\n [signal.signal(s, signal.SIG_DFL) for s in self.SIGNALS]\n # init new signaling\n signal.signal(signal.SIGQUIT, self.handle_quit)\n signal.signal(signal.SIGTERM, self.handle_exit)\n signal.signal(signal.SIGINT, self.handle_quit)\n signal.signal(signal.SIGWINCH, self.handle_winch)\n signal.signal(signal.SIGUSR1, self.handle_usr1)\n signal.signal(signal.SIGABRT, self.handle_abort)\n\n # Don't let SIGTERM and SIGUSR1 disturb active requests\n # by interrupting system calls\n if hasattr(signal, 'siginterrupt'): # python >= 2.6\n signal.siginterrupt(signal.SIGTERM, False)\n signal.siginterrupt(signal.SIGUSR1, False)\n\n def handle_usr1(self, sig, frame):\n self.log.reopen_files()\n\n def handle_exit(self, sig, frame):\n self.alive = False\n\n def handle_quit(self, sig, frame):\n self.alive = False\n # worker_int callback\n self.cfg.worker_int(self)\n time.sleep(0.1)\n sys.exit(0)\n\n def handle_abort(self, sig, frame):\n self.alive = False\n self.cfg.worker_abort(self)\n sys.exit(1)\n\n def handle_error(self, req, client, addr, exc):\n request_start = datetime.now()\n addr = addr or ('', -1) # unix socket case\n if isinstance(exc, (InvalidRequestLine, InvalidRequestMethod,\n InvalidHTTPVersion, InvalidHeader, InvalidHeaderName,\n LimitRequestLine, LimitRequestHeaders,\n InvalidProxyLine, ForbiddenProxyRequest)):\n\n status_int = 400\n reason = \"Bad Request\"\n\n if isinstance(exc, InvalidRequestLine):\n mesg = \"Invalid Request Line '%s'\" % str(exc)\n elif isinstance(exc, InvalidRequestMethod):\n mesg = \"Invalid Method '%s'\" % str(exc)\n elif isinstance(exc, InvalidHTTPVersion):\n mesg = \"Invalid HTTP Version '%s'\" % str(exc)\n elif isinstance(exc, (InvalidHeaderName, InvalidHeader,)):\n mesg = \"%s\" % str(exc)\n if not req and hasattr(exc, \"req\"):\n req = exc.req # for access log\n elif isinstance(exc, LimitRequestLine):\n mesg = \"%s\" % str(exc)\n elif isinstance(exc, LimitRequestHeaders):\n mesg = \"Error parsing headers: '%s'\" % str(exc)\n elif isinstance(exc, InvalidProxyLine):\n mesg = \"'%s'\" % str(exc)\n elif isinstance(exc, ForbiddenProxyRequest):\n reason = \"Forbidden\"\n mesg = \"Request forbidden\"\n status_int = 403\n\n msg = \"Invalid request from ip={ip}: {error}\"\n self.log.debug(msg.format(ip=addr[0], error=str(exc)))\n else:\n self.log.exception(\"Error handling request %s\", req.uri)\n\n status_int = 500\n reason = \"Internal Server Error\"\n mesg = \"\"\n\n if req is not None:\n request_time = datetime.now() - request_start\n environ = default_environ(req, client, self.cfg)\n environ['REMOTE_ADDR'] = addr[0]\n environ['REMOTE_PORT'] = str(addr[1])\n resp = Response(req, client, self.cfg)\n resp.status = \"%s %s\" % (status_int, reason)\n resp.response_length = len(mesg)\n self.log.access(resp, req, environ, request_time)\n\n try:\n util.write_error(client, status_int, reason, mesg)\n except:\n self.log.debug(\"Failed to send error message.\")\n\n def handle_winch(self, sig, fname):\n # Ignore SIGWINCH in worker. Fixes a crash on OpenBSD.\n return\n",
"path": "gunicorn/workers/base.py"
}
] | diff --git a/THANKS b/THANKS
index d32d1bbf8..6fb97720e 100644
--- a/THANKS
+++ b/THANKS
@@ -158,3 +158,4 @@ Kyle Mulka <[email protected]>
Marc Abramowitz <[email protected]>
Hebert J <[email protected]>
Kevin Littlejohn <[email protected]>
+Wolfgang Schnerring <[email protected]>
diff --git a/gunicorn/workers/base.py b/gunicorn/workers/base.py
index 0b8cc115c..9ba8e7568 100644
--- a/gunicorn/workers/base.py
+++ b/gunicorn/workers/base.py
@@ -210,7 +210,7 @@ def handle_error(self, req, client, addr, exc):
msg = "Invalid request from ip={ip}: {error}"
self.log.debug(msg.format(ip=addr[0], error=str(exc)))
else:
- self.log.exception("Error handling request")
+ self.log.exception("Error handling request %s", req.uri)
status_int = 500
reason = "Internal Server Error"
| Include request URL in error message
It would be really helpful if the logged error message were "Error handling request http://host/path/etc" instead of just "Error handling request".
|
wemake-services__wemake-python-styleguide-343 | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\ndef _get_project_meta():\n import tomlkit\n\n with open('../pyproject.toml') as pyproject:\n contents = pyproject.read()\n\n return tomlkit.parse(contents)['tool']['poetry']\n\n\npkg_meta = _get_project_meta()\nproject = pkg_meta['name']\ncopyright = '2018, wemake.services'\nauthor = 'wemake.services'\n\n# The short X.Y version\nversion = pkg_meta['version']\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n\n # Used to include .md files:\n 'm2r',\n\n # Used to write python docstrings in a readable way:\n 'sphinxcontrib.napoleon',\n\n # Used to insert typehints into the final docs:\n 'sphinx_autodoc_typehints',\n\n # Used to embed values from the source code into the docs:\n 'added_value',\n]\n\nautoclass_content = 'class'\nautodoc_member_order = 'bysource'\n\nautodoc_mock_imports = [\n 'attr',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = {\n 'members': '',\n 'undoc-members': 'code,error_template',\n 'exclude-members': '__dict__,__weakref__',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\nadd_module_names = False\n\nautodoc_default_options = {\n 'show-inheritance': True,\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'sidebar_collapse': False,\n 'show_powered_by': False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'moreinfo.html',\n 'github.html',\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'wemake-python-styleguidedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'wemake-python-styleguide.tex',\n 'wemake-python-styleguide Documentation',\n 'wemake.services',\n 'manual',\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n [author],\n 1,\n )\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n author,\n 'wemake-python-styleguide',\n 'One line description of project.',\n 'Miscellaneous',\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = False\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n",
"path": "docs/conf.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n#\n# Configuration file for the Sphinx documentation builder.\n#\n# This file does only contain a selection of the most common options. For a\n# full list see the documentation:\n# http://www.sphinx-doc.org/en/master/config\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n\nimport os\nimport sys\nsys.path.insert(0, os.path.abspath('..'))\n\n\n# -- Project information -----------------------------------------------------\n\ndef _get_project_meta():\n import tomlkit\n\n with open('../pyproject.toml') as pyproject:\n contents = pyproject.read()\n\n return tomlkit.parse(contents)['tool']['poetry']\n\n\npkg_meta = _get_project_meta()\nproject = pkg_meta['name']\ncopyright = '2018, wemake.services'\nauthor = 'wemake.services'\n\n# The short X.Y version\nversion = pkg_meta['version']\n# The full version, including alpha/beta/rc tags\nrelease = version\n\n\n# -- General configuration ---------------------------------------------------\n\n# If your documentation needs a minimal Sphinx version, state it here.\n#\n# needs_sphinx = '1.0'\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n 'sphinx.ext.autodoc',\n 'sphinx.ext.doctest',\n 'sphinx.ext.todo',\n 'sphinx.ext.coverage',\n 'sphinx.ext.viewcode',\n 'sphinx.ext.autosummary',\n 'sphinx.ext.napoleon',\n\n # Used to include .md files:\n 'm2r',\n\n # Used to insert typehints into the final docs:\n 'sphinx_autodoc_typehints',\n\n # Used to embed values from the source code into the docs:\n 'added_value',\n]\n\nautoclass_content = 'class'\nautodoc_member_order = 'bysource'\n\nautodoc_mock_imports = [\n 'attr',\n]\n\nautodoc_member_order = 'bysource'\nautodoc_default_flags = {\n 'members': '',\n 'undoc-members': 'code,error_template',\n 'exclude-members': '__dict__,__weakref__',\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = ['_templates']\n\n# The suffix(es) of source filenames.\n# You can specify multiple suffix as a list of string:\n\nsource_suffix = ['.rst', '.md']\n\n# The master toctree document.\nmaster_doc = 'index'\n\n# The language for content autogenerated by Sphinx. Refer to documentation\n# for a list of supported languages.\n#\n# This is also used if you do content translation via gettext catalogs.\n# Usually you set \"language\" from the command line for these cases.\nlanguage = None\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path .\nexclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']\n\n# The name of the Pygments (syntax highlighting) style to use.\npygments_style = 'sphinx'\n\nadd_module_names = False\n\nautodoc_default_options = {\n 'show-inheritance': True,\n}\n\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = 'alabaster'\n\n# Theme options are theme-specific and customize the look and feel of a theme\n# further. For a list of options available for each theme, see the\n# documentation.\nhtml_theme_options = {\n 'sidebar_collapse': False,\n 'show_powered_by': False,\n}\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = ['_static']\n\n# Custom sidebar templates, must be a dictionary that maps document names\n# to template names.\n#\n# This is required for the alabaster theme\n# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars\nhtml_sidebars = {\n '**': [\n 'about.html',\n 'navigation.html',\n 'moreinfo.html',\n 'github.html',\n 'searchbox.html',\n ]\n}\n\n\n# -- Options for HTMLHelp output ---------------------------------------------\n\n# Output file base name for HTML help builder.\nhtmlhelp_basename = 'wemake-python-styleguidedoc'\n\n\n# -- Options for LaTeX output ------------------------------------------------\n\nlatex_elements = {\n # The paper size ('letterpaper' or 'a4paper').\n #\n # 'papersize': 'letterpaper',\n\n # The font size ('10pt', '11pt' or '12pt').\n #\n # 'pointsize': '10pt',\n\n # Additional stuff for the LaTeX preamble.\n #\n # 'preamble': '',\n\n # Latex figure (float) alignment\n #\n # 'figure_align': 'htbp',\n}\n\n# Grouping the document tree into LaTeX files. List of tuples\n# (source start file, target name, title,\n# author, documentclass [howto, manual, or own class]).\nlatex_documents = [\n (\n master_doc,\n 'wemake-python-styleguide.tex',\n 'wemake-python-styleguide Documentation',\n 'wemake.services',\n 'manual',\n ),\n]\n\n\n# -- Options for manual page output ------------------------------------------\n\n# One entry per manual page. List of tuples\n# (source start file, name, description, authors, manual section).\nman_pages = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n [author],\n 1,\n )\n]\n\n\n# -- Options for Texinfo output ----------------------------------------------\n\n# Grouping the document tree into Texinfo files. List of tuples\n# (source start file, target name, title, author,\n# dir menu entry, description, category)\ntexinfo_documents = [\n (\n master_doc,\n 'wemake-python-styleguide',\n 'wemake-python-styleguide Documentation',\n author,\n 'wemake-python-styleguide',\n 'One line description of project.',\n 'Miscellaneous',\n ),\n]\n\n\n# -- Extension configuration -------------------------------------------------\n\nnapoleon_numpy_docstring = False\n\n# -- Options for todo extension ----------------------------------------------\n\n# If true, `todo` and `todoList` produce output, else they produce nothing.\ntodo_include_todos = True\n",
"path": "docs/conf.py"
}
] | diff --git a/docs/conf.py b/docs/conf.py
index 1d09a1952..86ca65700 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,13 +55,11 @@ def _get_project_meta():
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx.ext.autosummary',
+ 'sphinx.ext.napoleon',
# Used to include .md files:
'm2r',
- # Used to write python docstrings in a readable way:
- 'sphinxcontrib.napoleon',
-
# Used to insert typehints into the final docs:
'sphinx_autodoc_typehints',
diff --git a/docs/requirements.txt b/docs/requirements.txt
index 8bad7533d..94fdaba2f 100644
--- a/docs/requirements.txt
+++ b/docs/requirements.txt
@@ -2,7 +2,6 @@
# to generate documentation.
sphinx==1.8.2
-sphinxcontrib-napoleon==0.7
sphinx_autodoc_typehints==1.5.0
recommonmark==0.4.0
m2r==0.2.1
diff --git a/poetry.lock b/poetry.lock
index dbcdbd74c..d90eb4535 100644
--- a/poetry.lock
+++ b/poetry.lock
@@ -89,7 +89,7 @@ description = "Code coverage measurement for Python"
name = "coverage"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4"
-version = "4.5.1"
+version = "4.5.2"
[[package]]
category = "dev"
@@ -425,11 +425,11 @@ mistune = "*"
[[package]]
category = "dev"
-description = "Implements a XML/HTML/XHTML Markup safe string for Python"
+description = "Safely add untrusted strings to HTML/XML markup."
name = "markupsafe"
optional = false
-python-versions = "*"
-version = "1.0"
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*"
+version = "1.1.0"
[[package]]
category = "main"
@@ -496,7 +496,7 @@ description = "Python Build Reasonableness"
name = "pbr"
optional = false
python-versions = "*"
-version = "5.1.0"
+version = "5.1.1"
[[package]]
category = "main"
@@ -517,17 +517,6 @@ optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
version = "0.8.0"
-[[package]]
-category = "dev"
-description = "A collection of helpful Python tools!"
-name = "pockets"
-optional = false
-python-versions = "*"
-version = "0.7.2"
-
-[package.dependencies]
-six = ">=1.5.2"
-
[[package]]
category = "dev"
description = "library with cross-python path, ini-parsing, io, code, log facilities"
@@ -578,7 +567,7 @@ description = "Python parsing module"
name = "pyparsing"
optional = false
python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*"
-version = "2.2.2"
+version = "2.3.0"
[[package]]
category = "dev"
@@ -586,7 +575,7 @@ description = "pytest: simple powerful testing with Python"
name = "pytest"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "3.9.3"
+version = "3.10.1"
[package.dependencies]
atomicwrites = ">=1.0"
@@ -667,7 +656,7 @@ description = "Python HTTP for Humans."
name = "requests"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "2.20.0"
+version = "2.20.1"
[package.dependencies]
certifi = ">=2017.4.17"
@@ -681,7 +670,7 @@ description = "reStructuredText linter"
name = "restructuredtext-lint"
optional = false
python-versions = "*"
-version = "1.1.3"
+version = "1.2.1"
[package.dependencies]
docutils = ">=0.11,<1.0"
@@ -716,7 +705,7 @@ description = "Python documentation generator"
name = "sphinx"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
-version = "1.8.1"
+version = "1.8.2"
[package.dependencies]
Jinja2 = ">=2.3"
@@ -739,10 +728,10 @@ description = "Type hints (PEP 484) support for the Sphinx autodoc extension"
name = "sphinx-autodoc-typehints"
optional = false
python-versions = "!=3.5.0, !=3.5.1"
-version = "1.3.0"
+version = "1.5.0"
[package.dependencies]
-Sphinx = ">=1.4"
+Sphinx = ">=1.7"
[[package]]
category = "dev"
@@ -752,18 +741,6 @@ optional = false
python-versions = "*"
version = "1.3.0"
-[[package]]
-category = "dev"
-description = "Sphinx \"napoleon\" extension."
-name = "sphinxcontrib-napoleon"
-optional = false
-python-versions = "*"
-version = "0.7"
-
-[package.dependencies]
-pockets = ">=0.3"
-six = ">=1.5.2"
-
[[package]]
category = "dev"
description = "Sphinx API for Web Apps"
@@ -833,10 +810,10 @@ description = "HTTP library with thread-safe connection pooling, file post, and
name = "urllib3"
optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, <4"
-version = "1.24"
+version = "1.24.1"
[metadata]
-content-hash = "23ccaf9e7348896e1939b5a9eee4fa10c4ff470b758b734d6ba70b6b9ed6f9b8"
+content-hash = "b13f9cfc68d0dba8a159017040dc03523e36a19c6c93ae64b4f797aca61ffcab"
python-versions = "^3.6 || ^3.7"
[metadata.hashes]
@@ -849,7 +826,7 @@ bandit = ["6102b5d6afd9d966df5054e0bdfc2e73a24d0fea400ec25f2e54c134412158d7", "9
certifi = ["339dc09518b07e2fa7eda5450740925974815557727d6bd35d319c1524a04a4c", "6d58c986d22b038c8c0df30d639f23a3e6d172a05c3583e766f4c0b785c0986a"]
chardet = ["84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae", "fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"]
colorama = ["a3d89af5db9e9806a779a50296b5fdb466e281147c2c235e8225ecc6dbf7bbf3", "c9b54bebe91a6a803e0772c8561d53f2926bfeb17cd141fbabcb08424086595c"]
-coverage = ["03481e81d558d30d230bc12999e3edffe392d244349a90f4ef9b88425fac74ba", "0b136648de27201056c1869a6c0d4e23f464750fd9a9ba9750b8336a244429ed", "104ab3934abaf5be871a583541e8829d6c19ce7bde2923b2751e0d3ca44db60a", "10a46017fef60e16694a30627319f38a2b9b52e90182dddb6e37dcdab0f4bf95", "15b111b6a0f46ee1a485414a52a7ad1d703bdf984e9ed3c288a4414d3871dcbd", "198626739a79b09fa0a2f06e083ffd12eb55449b5f8bfdbeed1df4910b2ca640", "1c383d2ef13ade2acc636556fd544dba6e14fa30755f26812f54300e401f98f2", "23d341cdd4a0371820eb2b0bd6b88f5003a7438bbedb33688cd33b8eae59affd", "28b2191e7283f4f3568962e373b47ef7f0392993bb6660d079c62bd50fe9d162", "2a5b73210bad5279ddb558d9a2bfedc7f4bf6ad7f3c988641d83c40293deaec1", "2eb564bbf7816a9d68dd3369a510be3327f1c618d2357fa6b1216994c2e3d508", "337ded681dd2ef9ca04ef5d93cfc87e52e09db2594c296b4a0a3662cb1b41249", "3a2184c6d797a125dca8367878d3b9a178b6fdd05fdc2d35d758c3006a1cd694", "3c79a6f7b95751cdebcd9037e4d06f8d5a9b60e4ed0cd231342aa8ad7124882a", "3d72c20bd105022d29b14a7d628462ebdc61de2f303322c0212a054352f3b287", "3eb42bf89a6be7deb64116dd1cc4b08171734d721e7a7e57ad64cc4ef29ed2f1", "4635a184d0bbe537aa185a34193898eee409332a8ccb27eea36f262566585000", "56e448f051a201c5ebbaa86a5efd0ca90d327204d8b059ab25ad0f35fbfd79f1", "5a13ea7911ff5e1796b6d5e4fbbf6952381a611209b736d48e675c2756f3f74e", "69bf008a06b76619d3c3f3b1983f5145c75a305a0fea513aca094cae5c40a8f5", "6bc583dc18d5979dc0f6cec26a8603129de0304d5ae1f17e57a12834e7235062", "701cd6093d63e6b8ad7009d8a92425428bc4d6e7ab8d75efbb665c806c1d79ba", "7608a3dd5d73cb06c531b8925e0ef8d3de31fed2544a7de6c63960a1e73ea4bc", "76ecd006d1d8f739430ec50cc872889af1f9c1b6b8f48e29941814b09b0fd3cc", "7aa36d2b844a3e4a4b356708d79fd2c260281a7390d678a10b91ca595ddc9e99", "7d3f553904b0c5c016d1dad058a7554c7ac4c91a789fca496e7d8347ad040653", "7e1fe19bd6dce69d9fd159d8e4a80a8f52101380d5d3a4d374b6d3eae0e5de9c", "8c3cb8c35ec4d9506979b4cf90ee9918bc2e49f84189d9bf5c36c0c1119c6558", "9d6dd10d49e01571bf6e147d3b505141ffc093a06756c60b053a859cb2128b1f", "9e112fcbe0148a6fa4f0a02e8d58e94470fc6cb82a5481618fea901699bf34c4", "ac4fef68da01116a5c117eba4dd46f2e06847a497de5ed1d64bb99a5fda1ef91", "b8815995e050764c8610dbc82641807d196927c3dbed207f0a079833ffcf588d", "be6cfcd8053d13f5f5eeb284aa8a814220c3da1b0078fa859011c7fffd86dab9", "c1bb572fab8208c400adaf06a8133ac0712179a334c09224fb11393e920abcdd", "de4418dadaa1c01d497e539210cb6baa015965526ff5afc078c57ca69160108d", "e05cb4d9aad6233d67e0541caa7e511fa4047ed7750ec2510d466e806e0255d6", "e4d96c07229f58cb686120f168276e434660e4358cc9cf3b0464210b04913e77", "f3f501f345f24383c0000395b26b726e46758b71393267aeae0bd36f8b3ade80", "f8a923a85cb099422ad5a2e345fe877bbc89a8a8b23235824a93488150e45f6e"]
+coverage = ["06123b58a1410873e22134ca2d88bd36680479fe354955b3579fb8ff150e4d27", "09e47c529ff77bf042ecfe858fb55c3e3eb97aac2c87f0349ab5a7efd6b3939f", "0a1f9b0eb3aa15c990c328535655847b3420231af299386cfe5efc98f9c250fe", "0cc941b37b8c2ececfed341444a456912e740ecf515d560de58b9a76562d966d", "0d34245f824cc3140150ab7848d08b7e2ba67ada959d77619c986f2062e1f0e8", "10e8af18d1315de936d67775d3a814cc81d0747a1a0312d84e27ae5610e313b0", "1b4276550b86caa60606bd3572b52769860a81a70754a54acc8ba789ce74d607", "1e8a2627c48266c7b813975335cfdea58c706fe36f607c97d9392e61502dc79d", "258b21c5cafb0c3768861a6df3ab0cfb4d8b495eee5ec660e16f928bf7385390", "2b224052bfd801beb7478b03e8a66f3f25ea56ea488922e98903914ac9ac930b", "3ad59c84c502cd134b0088ca9038d100e8fb5081bbd5ccca4863f3804d81f61d", "447c450a093766744ab53bf1e7063ec82866f27bcb4f4c907da25ad293bba7e3", "46101fc20c6f6568561cdd15a54018bb42980954b79aa46da8ae6f008066a30e", "4710dc676bb4b779c4361b54eb308bc84d64a2fa3d78e5f7228921eccce5d815", "510986f9a280cd05189b42eee2b69fecdf5bf9651d4cd315ea21d24a964a3c36", "5535dda5739257effef56e49a1c51c71f1d37a6e5607bb25a5eee507c59580d1", "5a7524042014642b39b1fcae85fb37556c200e64ec90824ae9ecf7b667ccfc14", "5f55028169ef85e1fa8e4b8b1b91c0b3b0fa3297c4fb22990d46ff01d22c2d6c", "6694d5573e7790a0e8d3d177d7a416ca5f5c150742ee703f3c18df76260de794", "6831e1ac20ac52634da606b658b0b2712d26984999c9d93f0c6e59fe62ca741b", "71afc1f5cd72ab97330126b566bbf4e8661aab7449f08895d21a5d08c6b051ff", "7349c27128334f787ae63ab49d90bf6d47c7288c63a0a5dfaa319d4b4541dd2c", "77f0d9fa5e10d03aa4528436e33423bfa3718b86c646615f04616294c935f840", "828ad813c7cdc2e71dcf141912c685bfe4b548c0e6d9540db6418b807c345ddd", "859714036274a75e6e57c7bab0c47a4602d2a8cfaaa33bbdb68c8359b2ed4f5c", "85a06c61598b14b015d4df233d249cd5abfa61084ef5b9f64a48e997fd829a82", "869ef4a19f6e4c6987e18b315721b8b971f7048e6eaea29c066854242b4e98d9", "8cb4febad0f0b26c6f62e1628f2053954ad2c555d67660f28dfb1b0496711952", "977e2d9a646773cc7428cdd9a34b069d6ee254fadfb4d09b3f430e95472f3cf3", "99bd767c49c775b79fdcd2eabff405f1063d9d959039c0bdd720527a7738748a", "a5c58664b23b248b16b96253880b2868fb34358911400a7ba39d7f6399935389", "aaa0f296e503cda4bc07566f592cd7a28779d433f3a23c48082af425d6d5a78f", "ab235d9fe64833f12d1334d29b558aacedfbca2356dfb9691f2d0d38a8a7bfb4", "b3b0c8f660fae65eac74fbf003f3103769b90012ae7a460863010539bb7a80da", "bab8e6d510d2ea0f1d14f12642e3f35cefa47a9b2e4c7cea1852b52bc9c49647", "c45297bbdbc8bb79b02cf41417d63352b70bcb76f1bbb1ee7d47b3e89e42f95d", "d19bca47c8a01b92640c614a9147b081a1974f69168ecd494687c827109e8f42", "d64b4340a0c488a9e79b66ec9f9d77d02b99b772c8b8afd46c1294c1d39ca478", "da969da069a82bbb5300b59161d8d7c8d423bc4ccd3b410a9b4d8932aeefc14b", "ed02c7539705696ecb7dc9d476d861f3904a8d2b7e894bd418994920935d36bb", "ee5b8abc35b549012e03a7b1e86c09491457dba6c94112a2482b18589cc2bdb9"]
doc8 = ["2df89f9c1a5abfb98ab55d0175fed633cae0cf45025b8b1e0ee5ea772be28543", "d12f08aa77a4a65eb28752f4bc78f41f611f9412c4155e2b03f1f5d4a45efe04"]
docutils = ["02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6", "51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274", "7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6"]
eradicate = ["f9af01c544ccd8f71bc2f7f3fa39dc363d842cfcb9c730a83676a59026ab5f24"]
@@ -880,39 +857,37 @@ imagesize = ["3f349de3eb99145973fefb7dbe38554414e5c30abd0c8e4b970a7c9d09f3a1d8",
isort = ["1153601da39a25b14ddc54955dbbacbb6b2d19135386699e2ad58517953b34af", "b9c40e9750f3d77e6e4d441d8b0266cf555e7cdabdcff33c4fd06366ca761ef8", "ec9ef8f4a9bc6f71eec99e1806bfa2de401650d996c59330782b89a5555c1497"]
jinja2 = ["74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd", "f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4"]
m2r = ["bf90bad66cda1164b17e5ba4a037806d2443f2a4d5ddc9f6a5554a0322aaed99"]
-markupsafe = ["a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665"]
+markupsafe = ["048ef924c1623740e70204aa7143ec592504045ae4429b59c30054cb31e3c432", "130f844e7f5bdd8e9f3f42e7102ef1d49b2e6fdf0d7526df3f87281a532d8c8b", "19f637c2ac5ae9da8bfd98cef74d64b7e1bb8a63038a3505cd182c3fac5eb4d9", "1b8a7a87ad1b92bd887568ce54b23565f3fd7018c4180136e1cf412b405a47af", "1c25694ca680b6919de53a4bb3bdd0602beafc63ff001fea2f2fc16ec3a11834", "1f19ef5d3908110e1e891deefb5586aae1b49a7440db952454b4e281b41620cd", "1fa6058938190ebe8290e5cae6c351e14e7bb44505c4a7624555ce57fbbeba0d", "31cbb1359e8c25f9f48e156e59e2eaad51cd5242c05ed18a8de6dbe85184e4b7", "3e835d8841ae7863f64e40e19477f7eb398674da6a47f09871673742531e6f4b", "4e97332c9ce444b0c2c38dd22ddc61c743eb208d916e4265a2a3b575bdccb1d3", "525396ee324ee2da82919f2ee9c9e73b012f23e7640131dd1b53a90206a0f09c", "52b07fbc32032c21ad4ab060fec137b76eb804c4b9a1c7c7dc562549306afad2", "52ccb45e77a1085ec5461cde794e1aa037df79f473cbc69b974e73940655c8d7", "5c3fbebd7de20ce93103cb3183b47671f2885307df4a17a0ad56a1dd51273d36", "5e5851969aea17660e55f6a3be00037a25b96a9b44d2083651812c99d53b14d1", "5edfa27b2d3eefa2210fb2f5d539fbed81722b49f083b2c6566455eb7422fd7e", "7d263e5770efddf465a9e31b78362d84d015cc894ca2c131901a4445eaa61ee1", "83381342bfc22b3c8c06f2dd93a505413888694302de25add756254beee8449c", "857eebb2c1dc60e4219ec8e98dfa19553dae33608237e107db9c6078b1167856", "98e439297f78fca3a6169fd330fbe88d78b3bb72f967ad9961bcac0d7fdd1550", "bf54103892a83c64db58125b3f2a43df6d2cb2d28889f14c78519394feb41492", "d9ac82be533394d341b41d78aca7ed0e0f4ba5a2231602e2f05aa87f25c51672", "e982fe07ede9fada6ff6705af70514a52beb1b2c3d25d4e873e82114cf3c5401", "edce2ea7f3dfc981c4ddc97add8a61381d9642dc3273737e756517cc03e84dd6", "efdc45ef1afc238db84cb4963aa689c0408912a0239b0721cb172b4016eb31d6", "f137c02498f8b935892d5c0172560d7ab54bc45039de8805075e19079c639a9c", "f82e347a72f955b7017a39708a3667f106e6ad4d10b25f237396a7115d8ed5fd", "fb7c206e01ad85ce57feeaaa0bf784b97fa3cad0d4a5737bc5295785f5c613a1"]
mccabe = ["ab8a6258860da4b6677da4bd2fe5dc2c659cff31b3ee4f7f5d64e79735b80d42", "dd8d182285a0fe56bace7f45b5e7d1a6ebcbf524e8f3bd87eb0f125271b8831f"]
mistune = ["59a3429db53c50b5c6bcc8a07f8848cb00d7dc8bdb431a4ab41920d201d4756e", "88a1051873018da288eee8538d476dffe1262495144b33ecb586c4ab266bb8d4"]
more-itertools = ["c187a73da93e7a8acc0001572aebc7e3c69daf7bf6881a2cea10650bd4420092", "c476b5d3a34e12d40130bc2f935028b5f636df8f372dc2c1c01dc19681b2039e", "fcbfeaea0be121980e15bc97b3817b5202ca73d0eae185b4550cbfce2a3ebb3d"]
mypy = ["8e071ec32cc226e948a34bbb3d196eb0fd96f3ac69b6843a5aff9bd4efa14455", "fb90c804b84cfd8133d3ddfbd630252694d11ccc1eb0166a1b2efb5da37ecab2"]
mypy-extensions = ["37e0e956f41369209a3d5f34580150bcacfabaa57b33a15c0b25f4b5725e0812", "b16cabe759f55e3409a7d231ebd2841378fb0c27a5d1994719e340e4f429ac3e"]
packaging = ["0886227f54515e592aaa2e5a553332c73962917f2831f1b0f9b9f4380a4b9807", "f95a1e147590f204328170981833854229bb2912ac3d5f89e2a8ccd2834800c9"]
-pbr = ["8fc938b1123902f5610b06756a31b1e6febf0d105ae393695b0c9d4244ed2910", "f20ec0abbf132471b68963bb34d9c78e603a5cf9e24473f14358e66551d47475"]
+pbr = ["f59d71442f9ece3dffc17bc36575768e1ee9967756e6b6535f0ee1f0054c3d68", "f6d5b23f226a2ba58e14e49aa3b1bfaf814d0199144b95d78458212444de1387"]
pep8-naming = ["360308d2c5d2fff8031c1b284820fbdb27a63274c0c1a8ce884d631836da4bdd", "624258e0dd06ef32a9daf3c36cc925ff7314da7233209c5b01f7e5cdd3c34826"]
pluggy = ["447ba94990e8014ee25ec853339faf7b0fc8050cdc3289d4d71f7f410fb90095", "bde19360a8ec4dfd8a20dcb811780a30998101f078fc7ded6162f0076f50508f"]
-pockets = ["109eb91588e9cf722de98c98d300e1c5896e877f5704dc61176fa09686ca635b", "21a2405543c439ac091453ed187f558cf5294d3f85f15310f214ad4de057e0af"]
py = ["bf92637198836372b520efcba9e020c330123be8ce527e535d185ed4b6f45694", "e76826342cefe3c3d5f7e8ee4316b80d1dd8a300781612ddbc765c17ba25a6c6"]
pycodestyle = ["74abc4e221d393ea5ce1f129ea6903209940c1ecd29e002e8c6933c2b21026e0", "cbc619d09254895b0d12c2c691e237b2e91e9b2ecf5e84c26b35400f93dcfb83", "cbfca99bd594a10f674d0cd97a3d802a1fdef635d4361e1a2658de47ed261e3a"]
pydocstyle = ["2258f9b0df68b97bf3a6c29003edc5238ff8879f1efb6f1999988d934e432bd8", "5741c85e408f9e0ddf873611085e819b809fca90b619f5fd7f34bd4959da3dd4", "ed79d4ec5e92655eccc21eb0c6cf512e69512b4a97d215ace46d17e4990f2039"]
pyflakes = ["9a7662ec724d0120012f6e29d6248ae3727d821bba522a0e6b356eff19126a49", "f661252913bc1dbe7fcfcbf0af0db3f42ab65aabd1a6ca68fe5d466bace94dae"]
pygments = ["78f3f434bcc5d6ee09020f92ba487f95ba50f1e3ef83ae96b9d5ffa1bab25c5d", "dbae1046def0efb574852fab9e90209b23f556367b5a320c0bcb871c77c3e8cc"]
-pyparsing = ["bc6c7146b91af3f567cf6daeaec360bc07d45ffec4cf5353f4d7a208ce7ca30a", "d29593d8ebe7b57d6967b62494f8c72b03ac0262b1eed63826c6f788b3606401"]
-pytest = ["a9e5e8d7ab9d5b0747f37740276eb362e6a76275d76cebbb52c6049d93b475db", "bf47e8ed20d03764f963f0070ff1c8fda6e2671fc5dd562a4d3b7148ad60f5ca"]
+pyparsing = ["40856e74d4987de5d01761a22d1621ae1c7f8774585acae358aa5c5936c6c90b", "f353aab21fd474459d97b709e527b5571314ee5f067441dc9f88e33eecd96592"]
+pytest = ["3f193df1cfe1d1609d4c583838bea3d532b18d6160fd3f55c9447fdca30848ec", "e246cf173c01169b9617fc07264b7b1316e78d7a650055235d6d897bc80d9660"]
pytest-cov = ["513c425e931a0344944f84ea47f3956be0e416d95acbd897a44970c8d926d5d7", "e360f048b7dae3f2f2a9a4d067b2dd6b6a015d384d1577c994a43f3f7cbad762"]
pytest-flake8 = ["4f30f5be3efb89755f38f11bdb2a5e22d19a6f5faa73428f703a3292a9572cd3", "c740ad6aa19e3958947d2118f70bed218caf1d2097039fb7318573a2a72f89a1"]
pytest-isort = ["2221c0914dfca41914625a646f0d2d1d4c676861b9a7b26746a7fdd40aa2c59b", "c70d0f900f4647bb714f0843dd82d7f7b759904006de31254efdb72ce88e0c0e"]
pytest-randomly = ["6db5e03d72b54052b9b379dc3cfa4749c19bfe4de161cf3eb24762049f4ce9be", "92ec6745d3ebdd690ecb598648748c9601f16f5afacf83ccef2b50d23e6edb7f"]
pytz = ["31cb35c89bd7d333cd32c5f278fca91b523b0834369e757f4c5641ea252236ca", "8e0f8568c118d3077b46be7d654cc8167fa916092e28320cde048e54bfc9f1e6"]
pyyaml = ["3d7da3009c0f3e783b2c873687652d83b1bbfd5c88e9813fb7e5b03c0dd3108b", "3ef3092145e9b70e3ddd2c7ad59bdd0252a94dfe3949721633e41344de00a6bf", "40c71b8e076d0550b2e6380bada1f1cd1017b882f7e16f09a65be98e017f211a", "558dd60b890ba8fd982e05941927a3911dc409a63dcb8b634feaa0cda69330d3", "a7c28b45d9f99102fa092bb213aa12e0aaf9a6a1f5e395d36166639c1f96c3a1", "aa7dd4a6a427aed7df6fb7f08a580d68d9b118d90310374716ae90b710280af1", "bc558586e6045763782014934bfaf39d48b8ae85a2713117d16c39864085c613", "d46d7982b62e0729ad0175a9bc7e10a566fc07b224d2c79fafb5e032727eaa04", "d5eef459e30b09f5a098b9cea68bebfeb268697f78d647bd255a085371ac7f3f", "e01d3203230e1786cd91ccfdc8f8454c8069c91bee3962ad93b87a4b2860f537", "e170a9e6fcfd19021dd29845af83bb79236068bf5fd4df3327c1be18182b2531"]
-requests = ["99dcfdaaeb17caf6e526f32b6a7b780461512ab3f1d992187801694cba42770c", "a84b8c9ab6239b578f22d1c21d51b696dcfe004032bb80ea832398d6909d7279"]
-restructuredtext-lint = ["c48ca9a84c312b262809f041fe47dcfaedc9ee4879b3e1f9532f745c182b4037"]
+requests = ["65b3a120e4329e33c9889db89c80976c5272f56ea92d3e74da8a463992e3ff54", "ea881206e59f41dbd0bd445437d792e43906703fff75ca8ff43ccdb11f33f263"]
+restructuredtext-lint = ["8712f9066d2c748002ec24f6f7ddca13e0c37654ae4f1ba0dcf0e78ba453c387"]
six = ["70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", "832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb"]
smmap2 = ["0555a7bf4df71d1ef4218e4807bbf9b201f910174e6e08af2e138d4e517b4dde", "29a9ffa0497e7f2be94ca0ed1ca1aa3cd4cf25a1f6b4f5f87f74b46ed91d609a"]
snowballstemmer = ["919f26a68b2c17a7634da993d91339e288964f93c274f1343e3bbbe2096e1128", "9f3bcd3c401c3e862ec0ebe6d2c069ebc012ce142cce209c098ccb5b09136e89"]
-sphinx = ["652eb8c566f18823a022bb4b6dbc868d366df332a11a0226b5bc3a798a479f17", "d222626d8356de702431e813a05c68a35967e3d66c6cd1c2c89539bb179a7464"]
-sphinx-autodoc-typehints = ["1a9df6cb3ba72453ea4bfbe96ea887abc0d796b2ce9508c2189217a1bb69b366", "46cc9e985ee6d8bbbd07fffd95b815c39a72df6afb600f59671f85f7340e7d0d"]
+sphinx = ["120732cbddb1b2364471c3d9f8bfd4b0c5b550862f99a65736c77f970b142aea", "b348790776490894e0424101af9c8413f2a86831524bd55c5f379d3e3e12ca64"]
+sphinx-autodoc-typehints = ["1a8625295a5084fceeddd0f8a9718ec1d2f937652ad027381a9d9e4943b14972", "a635e98faf2b9f1169eba19104bd8d378e45da6f8936f6f774d9cc0640d4caee"]
sphinx-readable-theme = ["f5fe65a2e112cb956b366df41e0fc894ff6b6f0e4a4814fcbff692566db47fc0"]
-sphinxcontrib-napoleon = ["407382beed396e9f2d7f3043fad6afda95719204a1e1a231ac865f40abcbfcf8", "711e41a3974bdf110a484aec4c1a556799eb0b3f3b897521a018ad7e2db13fef"]
sphinxcontrib-websupport = ["68ca7ff70785cbe1e7bccc71a48b5b6d965d79ca50629606c7861a21b206d9dd", "9de47f375baf1ea07cdb3436ff39d7a9c76042c10a769c52353ec46e4e8fc3b9"]
stevedore = ["b92bc7add1a53fb76c634a178978d113330aaf2006f9498d9e2414b31fbfc104", "c58b7c231a9c4890cd3c2b5d2b23bd63fa807ff934d68579e3f6c3a1735e8a7c"]
testfixtures = ["334497d26344e8c0c5d01b4d785a1c83464573151e6a5f7ab250eb7981d452ec", "53c06c1feb0bf378d63c54d1d96858978422d5a34793b39f0dcb0e44f8ec26f4"]
@@ -920,4 +895,4 @@ tomlkit = ["27ddd2796855428a0316057884ec081a1c967c8d29c3d489fcfccd1bb2976ede", "
typed-ast = ["0948004fa228ae071054f5208840a1e88747a357ec1101c17217bfe99b299d58", "10703d3cec8dcd9eef5a630a04056bbc898abc19bac5691612acba7d1325b66d", "1f6c4bd0bdc0f14246fd41262df7dfc018d65bb05f6e16390b7ea26ca454a291", "25d8feefe27eb0303b73545416b13d108c6067b846b543738a25ff304824ed9a", "29464a177d56e4e055b5f7b629935af7f49c196be47528cc94e0a7bf83fbc2b9", "2e214b72168ea0275efd6c884b114ab42e316de3ffa125b267e732ed2abda892", "3e0d5e48e3a23e9a4d1a9f698e32a542a4a288c871d33ed8df1b092a40f3a0f9", "519425deca5c2b2bdac49f77b2c5625781abbaf9a809d727d3a5596b30bb4ded", "57fe287f0cdd9ceaf69e7b71a2e94a24b5d268b35df251a88fef5cc241bf73aa", "668d0cec391d9aed1c6a388b0d5b97cd22e6073eaa5fbaa6d2946603b4871efe", "68ba70684990f59497680ff90d18e756a47bf4863c604098f10de9716b2c0bdd", "6de012d2b166fe7a4cdf505eee3aaa12192f7ba365beeefaca4ec10e31241a85", "79b91ebe5a28d349b6d0d323023350133e927b4de5b651a8aa2db69c761420c6", "8550177fa5d4c1f09b5e5f524411c44633c80ec69b24e0e98906dd761941ca46", "898f818399cafcdb93cbbe15fc83a33d05f18e29fb498ddc09b0214cdfc7cd51", "94b091dc0f19291adcb279a108f5d38de2430411068b219f41b343c03b28fb1f", "a26863198902cda15ab4503991e8cf1ca874219e0118cbf07c126bce7c4db129", "a8034021801bc0440f2e027c354b4eafd95891b573e12ff0418dec385c76785c", "bc978ac17468fe868ee589c795d06777f75496b1ed576d308002c8a5756fb9ea", "c05b41bc1deade9f90ddc5d988fe506208019ebba9f2578c622516fd201f5863", "c9b060bd1e5a26ab6e8267fd46fc9e02b54eb15fffb16d112d4c7b1c12987559", "edb04bdd45bfd76c8292c4d9654568efaedf76fe78eb246dde69bdb13b2dad87", "f19f2a4f547505fe9072e15f6f4ae714af51b5a681a97f187971f50c283193b6"]
typing = ["4027c5f6127a6267a435201981ba156de91ad0d1d98e9ddc2aa173453453492d", "57dcf675a99b74d64dacf6fba08fb17cf7e3d5fdff53d4a30ea2a5e7e52543d4", "a4c8473ce11a65999c8f59cb093e70686b6c84c98df58c1dae9b3b196089858a"]
typing-extensions = ["2a6c6e78e291a4b6cbd0bbfd30edc0baaa366de962129506ec8fe06bdec66457", "51e7b7f3dcabf9ad22eed61490f3b8d23d9922af400fe6656cb08e66656b701f", "55401f6ed58ade5638eb566615c150ba13624e2f0c1eedd080fc3c1b6cb76f1d"]
-urllib3 = ["41c3db2fc01e5b907288010dec72f9d0a74e37d6994e6eb56849f59fea2265ae", "8819bba37a02d143296a4d032373c4dd4aca11f6d4c9973335ca75f9c8475f59"]
+urllib3 = ["61bf29cada3fc2fbefad4fdf059ea4bd1b4a86d2b6d15e1c7c0b582b9752fe39", "de9529817c93f27c8ccbfead6985011db27bd0ddfcdb2d86f3f663385c6a9c22"]
diff --git a/pyproject.toml b/pyproject.toml
index b02204a72..ac2aafbcc 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -71,7 +71,6 @@ flake8-pytest = "^1.3"
mypy = "^0.641"
sphinx = "^1.8"
sphinx-autodoc-typehints = "^1.3"
-sphinxcontrib-napoleon = "^0.7"
doc8 = "^0.8"
m2r = "^0.2"
sphinx_readable_theme = "^1.3"
| Replace `sphinxcontrib-napoleon`
It is now bundled with `sphinx` as `sphinx.ext.napoleon`.
So, we need to remove this dependency from both:
- `pyproject.toml`
- `docs/requirements.txt`
|
LibraryOfCongress__concordia-1208 | [
{
"content": "from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.urls import include, path\nfrom django.urls.converters import register_converter\nfrom django.views.defaults import page_not_found, permission_denied, server_error\nfrom django.views.generic import RedirectView\n\nfrom exporter import views as exporter_views\n\nfrom . import converters, views\n\nregister_converter(converters.UnicodeSlugConverter, \"uslug\")\nregister_converter(converters.ItemIdConverter, \"item_id\")\n\ntx_urlpatterns = (\n [\n path(\"\", views.CampaignListView.as_view(), name=\"campaign-list\"),\n path(\n \"<uslug:slug>/\", views.CampaignDetailView.as_view(), name=\"campaign-detail\"\n ),\n path(\n \"<uslug:campaign_slug>/export/csv/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=\"campaign-export-csv\",\n ),\n path(\n \"<uslug:campaign_slug>/export/bagit/\",\n exporter_views.ExportCampaignToBagIt.as_view(),\n name=\"campaign-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/\",\n exporter_views.ExportProjectToBagIt.as_view(),\n name=\"project-export-bagit\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/export/bagit/\"\n ),\n exporter_views.ExportItemToBagIt.as_view(),\n name=\"item-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/report/\",\n views.ReportCampaignView.as_view(),\n name=\"campaign-report\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/<uslug:slug>/\"\n ),\n views.AssetDetailView.as_view(),\n name=\"asset-detail\",\n ),\n # n.b. this must be above project-detail to avoid being seen as a project slug:\n path(\n \"<uslug:campaign_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_campaign_asset,\n name=\"redirect-to-next-transcribable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_campaign_asset,\n name=\"redirect-to-next-reviewable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:slug>/\",\n views.ProjectDetailView.as_view(),\n name=\"project-detail\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/\",\n views.ItemDetailView.as_view(),\n name=\"item-detail\",\n ),\n ],\n \"transcriptions\",\n)\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"homepage\"),\n path(\"healthz\", views.healthz, name=\"health-check\"),\n path(\"about/\", views.simple_page, name=\"about\"),\n path(\"help-center/\", views.simple_page, name=\"help-center\"),\n path(\"help-center/welcome-guide/\", views.simple_page, name=\"welcome-guide\"),\n path(\"help-center/how-to-transcribe/\", views.simple_page, name=\"how-to-transcribe\"),\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\n \"help-center/welcome-guide-esp/\",\n views.simple_page,\n name=\"welcome-guide-spanish\",\n ),\n path(\n \"help-center/how-to-transcribe-esp/\",\n views.simple_page,\n name=\"how-to-transcribe-spanish\",\n ),\n path(\n \"help-center/how-to-review-esp/\",\n views.simple_page,\n name=\"how-to-review-spanish\",\n ),\n path(\"help-center/how-to-tag-esp/\", views.simple_page, name=\"how-to-tag-spanish\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n path(\"resources/\", views.simple_page, name=\"resources\"),\n path(\n \"latest/\",\n RedirectView.as_view(pattern_name=\"about\", permanent=True, query_string=True),\n ),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"act/\", views.action_app, name=\"action-app\"),\n path(\n \"campaigns-topics/\",\n views.CampaignTopicListView.as_view(),\n name=\"campaign-topic-list\",\n ),\n path(\"topics/\", views.TopicListView.as_view(), name=\"topic-list\"),\n path(\"topics/<uslug:slug>/\", views.TopicDetailView.as_view(), name=\"topic-detail\"),\n path(\n \"topics/<uslug:topic_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_topic_asset,\n name=\"redirect-to-next-transcribable-topic-asset\",\n ),\n path(\n \"topics/<uslug:topic_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_topic_asset,\n name=\"redirect-to-next-reviewable-topic-asset\",\n ),\n path(\n \"next-transcribable-asset/\",\n views.redirect_to_next_transcribable_asset,\n name=\"redirect-to-next-transcribable-asset\",\n ),\n path(\n \"next-reviewable-asset/\",\n views.redirect_to_next_reviewable_asset,\n name=\"redirect-to-next-reviewable-asset\",\n ),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n path(\"reserve-asset/<int:asset_pk>/\", views.reserve_asset, name=\"reserve-asset\"),\n path(\n \"assets/<int:asset_pk>/transcriptions/save/\",\n views.save_transcription,\n name=\"save-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/submit/\",\n views.submit_transcription,\n name=\"submit-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/review/\",\n views.review_transcription,\n name=\"review-transcription\",\n ),\n path(\"assets/<int:asset_pk>/tags/submit/\", views.submit_tags, name=\"submit-tags\"),\n path(\"assets/\", views.AssetListView.as_view(), name=\"asset-list\"),\n path(\n \"transcribe/\", views.TranscribeListView.as_view(), name=\"transcribe-asset-list\"\n ),\n path(\"review/\", views.ReviewListView.as_view(), name=\"review-asset-list\"),\n path(\"account/ajax-status/\", views.ajax_session_status, name=\"ajax-session-status\"),\n path(\"account/ajax-messages/\", views.ajax_messages, name=\"ajax-messages\"),\n path(\n \"account/register/\",\n views.ConcordiaRegistrationView.as_view(),\n name=\"registration_register\",\n ),\n path(\n \"account/login/\", views.ConcordiaLoginView.as_view(), name=\"registration_login\"\n ),\n path(\"account/profile/\", views.AccountProfileView.as_view(), name=\"user-profile\"),\n path(\n \"account/password_reset/\",\n views.ConcordiaPasswordResetRequestView.as_view(),\n name=\"password_reset\",\n ),\n path(\n \"account/reset/<uidb64>/<token>/\",\n views.ConcordiaPasswordResetConfirmView.as_view(),\n name=\"password_reset_confirm\",\n ),\n path(\"account/\", include(\"django_registration.backends.activation.urls\")),\n path(\"account/\", include(\"django.contrib.auth.urls\")),\n path(\n \".well-known/change-password\", # https://wicg.github.io/change-password-url/\n RedirectView.as_view(pattern_name=\"password_change\"),\n ),\n path(\"captcha/ajax/\", views.ajax_captcha, name=\"ajax-captcha\"),\n path(\"captcha/\", include(\"captcha.urls\")),\n path(\"admin/\", admin.site.urls),\n # Internal support assists:\n path(\"error/500/\", server_error),\n path(\"error/404/\", page_not_found, {\"exception\": Http404()}),\n path(\"error/429/\", views.ratelimit_view),\n path(\"error/403/\", permission_denied, {\"exception\": HttpResponseForbidden()}),\n url(\"\", include(\"django_prometheus_metrics.urls\")),\n path(\"robots.txt\", include(\"robots.urls\")),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n from django.conf.urls.static import static\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"path": "concordia/urls.py"
}
] | [
{
"content": "from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.http import Http404, HttpResponseForbidden\nfrom django.urls import include, path\nfrom django.urls.converters import register_converter\nfrom django.views.defaults import page_not_found, permission_denied, server_error\nfrom django.views.generic import RedirectView\n\nfrom exporter import views as exporter_views\n\nfrom . import converters, views\n\nregister_converter(converters.UnicodeSlugConverter, \"uslug\")\nregister_converter(converters.ItemIdConverter, \"item_id\")\n\ntx_urlpatterns = (\n [\n path(\"\", views.CampaignListView.as_view(), name=\"campaign-list\"),\n path(\n \"<uslug:slug>/\", views.CampaignDetailView.as_view(), name=\"campaign-detail\"\n ),\n path(\n \"<uslug:campaign_slug>/export/csv/\",\n exporter_views.ExportCampaignToCSV.as_view(),\n name=\"campaign-export-csv\",\n ),\n path(\n \"<uslug:campaign_slug>/export/bagit/\",\n exporter_views.ExportCampaignToBagIt.as_view(),\n name=\"campaign-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/export/bagit/\",\n exporter_views.ExportProjectToBagIt.as_view(),\n name=\"project-export-bagit\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/export/bagit/\"\n ),\n exporter_views.ExportItemToBagIt.as_view(),\n name=\"item-export-bagit\",\n ),\n path(\n \"<uslug:campaign_slug>/report/\",\n views.ReportCampaignView.as_view(),\n name=\"campaign-report\",\n ),\n path(\n (\n \"<uslug:campaign_slug>/<uslug:project_slug>/\"\n \"<item_id:item_id>/<uslug:slug>/\"\n ),\n views.AssetDetailView.as_view(),\n name=\"asset-detail\",\n ),\n # n.b. this must be above project-detail to avoid being seen as a project slug:\n path(\n \"<uslug:campaign_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_campaign_asset,\n name=\"redirect-to-next-transcribable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_campaign_asset,\n name=\"redirect-to-next-reviewable-campaign-asset\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:slug>/\",\n views.ProjectDetailView.as_view(),\n name=\"project-detail\",\n ),\n path(\n \"<uslug:campaign_slug>/<uslug:project_slug>/<item_id:item_id>/\",\n views.ItemDetailView.as_view(),\n name=\"item-detail\",\n ),\n ],\n \"transcriptions\",\n)\n\nurlpatterns = [\n path(\"\", views.HomeView.as_view(), name=\"homepage\"),\n path(\"healthz\", views.healthz, name=\"health-check\"),\n path(\"about/\", views.simple_page, name=\"about\"),\n path(\"help-center/\", views.simple_page, name=\"help-center\"),\n path(\"help-center/welcome-guide/\", views.simple_page, name=\"welcome-guide\"),\n path(\"help-center/how-to-transcribe/\", views.simple_page, name=\"how-to-transcribe\"),\n path(\"help-center/how-to-review/\", views.simple_page, name=\"how-to-review\"),\n path(\"help-center/how-to-tag/\", views.simple_page, name=\"how-to-tag\"),\n path(\n \"help-center/welcome-guide-esp/\",\n views.simple_page,\n name=\"welcome-guide-spanish\",\n ),\n path(\n \"help-center/how-to-transcribe-esp/\",\n views.simple_page,\n name=\"how-to-transcribe-spanish\",\n ),\n path(\n \"help-center/how-to-review-esp/\",\n views.simple_page,\n name=\"how-to-review-spanish\",\n ),\n path(\"help-center/how-to-tag-esp/\", views.simple_page, name=\"how-to-tag-spanish\"),\n path(\"for-educators/\", views.simple_page, name=\"for-educators\"),\n path(\"for-staff/\", views.simple_page, name=\"for-staff\"),\n path(\"resources/\", views.simple_page, name=\"resources\"),\n path(\n \"latest/\",\n RedirectView.as_view(pattern_name=\"about\", permanent=True, query_string=True),\n ),\n path(\"questions/\", views.simple_page, name=\"questions\"),\n path(\"contact/\", views.ContactUsView.as_view(), name=\"contact\"),\n path(\"act/\", views.action_app, name=\"action-app\"),\n path(\n \"campaigns-topics/\",\n views.CampaignTopicListView.as_view(),\n name=\"campaign-topic-list\",\n ),\n path(\"topics/\", views.TopicListView.as_view(), name=\"topic-list\"),\n path(\"topics/<uslug:slug>/\", views.TopicDetailView.as_view(), name=\"topic-detail\"),\n path(\n \"topics/<uslug:topic_slug>/next-transcribable-asset/\",\n views.redirect_to_next_transcribable_topic_asset,\n name=\"redirect-to-next-transcribable-topic-asset\",\n ),\n path(\n \"topics/<uslug:topic_slug>/next-reviewable-asset/\",\n views.redirect_to_next_reviewable_topic_asset,\n name=\"redirect-to-next-reviewable-topic-asset\",\n ),\n path(\n \"next-transcribable-asset/\",\n views.redirect_to_next_transcribable_asset,\n name=\"redirect-to-next-transcribable-asset\",\n ),\n path(\n \"next-reviewable-asset/\",\n views.redirect_to_next_reviewable_asset,\n name=\"redirect-to-next-reviewable-asset\",\n ),\n path(\"campaigns/\", include(tx_urlpatterns, namespace=\"transcriptions\")),\n path(\"reserve-asset/<int:asset_pk>/\", views.reserve_asset, name=\"reserve-asset\"),\n path(\n \"assets/<int:asset_pk>/transcriptions/save/\",\n views.save_transcription,\n name=\"save-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/submit/\",\n views.submit_transcription,\n name=\"submit-transcription\",\n ),\n path(\n \"transcriptions/<int:pk>/review/\",\n views.review_transcription,\n name=\"review-transcription\",\n ),\n path(\"assets/<int:asset_pk>/tags/submit/\", views.submit_tags, name=\"submit-tags\"),\n path(\"assets/\", views.AssetListView.as_view(), name=\"asset-list\"),\n path(\n \"transcribe/\", views.TranscribeListView.as_view(), name=\"transcribe-asset-list\"\n ),\n path(\"review/\", views.ReviewListView.as_view(), name=\"review-asset-list\"),\n path(\"account/ajax-status/\", views.ajax_session_status, name=\"ajax-session-status\"),\n path(\"account/ajax-messages/\", views.ajax_messages, name=\"ajax-messages\"),\n path(\n \"account/register/\",\n views.ConcordiaRegistrationView.as_view(),\n name=\"registration_register\",\n ),\n path(\n \"account/login/\", views.ConcordiaLoginView.as_view(), name=\"registration_login\"\n ),\n path(\"account/profile/\", views.AccountProfileView.as_view(), name=\"user-profile\"),\n path(\n \"account/password_reset/\",\n views.ConcordiaPasswordResetRequestView.as_view(),\n name=\"password_reset\",\n ),\n path(\n \"account/reset/<uidb64>/<token>/\",\n views.ConcordiaPasswordResetConfirmView.as_view(),\n name=\"password_reset_confirm\",\n ),\n path(\"account/\", include(\"django_registration.backends.activation.urls\")),\n path(\"account/\", include(\"django.contrib.auth.urls\")),\n path(\n \".well-known/change-password\", # https://wicg.github.io/change-password-url/\n RedirectView.as_view(pattern_name=\"password_change\"),\n ),\n path(\"captcha/ajax/\", views.ajax_captcha, name=\"ajax-captcha\"),\n path(\"captcha/\", include(\"captcha.urls\")),\n path(\"admin/\", admin.site.urls),\n # Internal support assists:\n path(\"error/500/\", server_error),\n path(\"error/404/\", page_not_found, {\"exception\": Http404()}),\n path(\"error/429/\", views.ratelimit_view),\n path(\"error/403/\", permission_denied, {\"exception\": HttpResponseForbidden()}),\n url(\"\", include(\"django_prometheus_metrics.urls\")),\n path(\"robots.txt\", include(\"robots.urls\")),\n]\n\nif settings.DEBUG:\n import debug_toolbar\n from django.conf.urls.static import static\n\n urlpatterns = [path(\"__debug__/\", include(debug_toolbar.urls))] + urlpatterns\n\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n",
"path": "concordia/urls.py"
}
] | diff --git a/concordia/urls.py b/concordia/urls.py
index 748323883..61acfedcd 100644
--- a/concordia/urls.py
+++ b/concordia/urls.py
@@ -107,6 +107,7 @@
),
path("help-center/how-to-tag-esp/", views.simple_page, name="how-to-tag-spanish"),
path("for-educators/", views.simple_page, name="for-educators"),
+ path("for-staff/", views.simple_page, name="for-staff"),
path("resources/", views.simple_page, name="resources"),
path(
"latest/",
| Create URL path for staff page
We will create a simple page for staff. Need to create the URL path.
ex - `crowd.loc.gov/for-staff`
|
spack__spack-23320 | [
{
"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\n\nfrom spack import *\n\n\nclass Bzip2(Package, SourcewarePackage):\n \"\"\"bzip2 is a freely available, patent free high-quality data\n compressor. It typically compresses files to within 10% to 15%\n of the best available techniques (the PPM family of statistical\n compressors), whilst being around twice as fast at compression\n and six times faster at decompression.\"\"\"\n\n homepage = \"https://sourceware.org/bzip2/\"\n sourceware_mirror_path = \"bzip2/bzip2-1.0.8.tar.gz\"\n\n executables = [r'^bzip2$']\n\n version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')\n version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')\n version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('pic', default=False, description='Build static libraries with PIC')\n variant('debug', default=False, description='Enable debug symbols and disable optimization')\n\n depends_on('diffutils', type='build')\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('--help', output=str, error=str)\n match = re.search(r'bzip2, a block-sorting file compressor.'\n ' Version ([^,]+)', output)\n return match.group(1) if match else None\n\n # override default implementation\n @property\n def libs(self):\n shared = '+shared' in self.spec\n return find_libraries(\n 'libbz2', root=self.prefix, shared=shared, recursive=True\n )\n\n def flag_handler(self, name, flags):\n if name == 'cflags':\n if '+pic' in self.spec:\n flags.append(self.compiler.cc_pic_flag)\n if '+debug' in self.spec:\n flags.append('-g')\n return(flags, None, None)\n\n def patch(self):\n if self.spec.satisfies('+debug'):\n for makefile in ['Makefile', 'Makefile-libbz2_so']:\n filter_file(r'-O ', '-O0 ', makefile)\n filter_file(r'-O2 ', '-O0 ', makefile)\n\n # bzip2 comes with two separate Makefiles for static and dynamic builds\n # Tell both to use Spack's compiler wrapper instead of GCC\n filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')\n filter_file(\n r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'\n )\n\n # The Makefiles use GCC flags that are incompatible with PGI\n if self.spec.satisfies('%pgi') or self.spec.satisfies('%nvhpc@:20.11'):\n filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')\n filter_file('-Wall -Winline', '-Minform=inform',\n 'Makefile-libbz2_so')\n\n # Patch the link line to use RPATHs on macOS\n if 'darwin' in self.spec.architecture:\n v = self.spec.version\n v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))\n\n kwargs = {'ignore_absent': False, 'backup': False, 'string': True}\n\n mf = FileFilter('Makefile-libbz2_so')\n mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa\n .format(v2, v3),\n '$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa\n .format(v1, v2, v3, v3),\n **kwargs)\n\n mf.filter(\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'\n .format(v3), **kwargs)\n mf.filter(\n 'rm -f libbz2.so.{0}'.format(v2),\n 'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)\n mf.filter(\n 'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),\n 'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),\n **kwargs)\n\n def install(self, spec, prefix):\n # Build the dynamic library first\n if '+shared' in spec:\n make('-f', 'Makefile-libbz2_so')\n\n # Build the static library and everything else\n make()\n make('install', 'PREFIX={0}'.format(prefix))\n\n if '+shared' in spec:\n install('bzip2-shared', join_path(prefix.bin, 'bzip2'))\n\n v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))\n if 'darwin' in self.spec.architecture:\n lib = 'libbz2.dylib'\n lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)\n for v in (v1, v2, v3))\n else:\n lib = 'libbz2.so'\n lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)\n for v in (v1, v2, v3))\n\n install(lib3, join_path(prefix.lib, lib3))\n with working_dir(prefix.lib):\n for libname in (lib, lib1, lib2):\n symlink(lib3, libname)\n\n with working_dir(prefix.bin):\n force_remove('bunzip2', 'bzcat')\n symlink('bzip2', 'bunzip2')\n symlink('bzip2', 'bzcat')\n",
"path": "var/spack/repos/builtin/packages/bzip2/package.py"
}
] | [
{
"content": "# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other\n# Spack Project Developers. See the top-level COPYRIGHT file for details.\n#\n# SPDX-License-Identifier: (Apache-2.0 OR MIT)\n\nimport re\n\nfrom spack import *\n\n\nclass Bzip2(Package, SourcewarePackage):\n \"\"\"bzip2 is a freely available, patent free high-quality data\n compressor. It typically compresses files to within 10% to 15%\n of the best available techniques (the PPM family of statistical\n compressors), whilst being around twice as fast at compression\n and six times faster at decompression.\"\"\"\n\n homepage = \"https://sourceware.org/bzip2/\"\n sourceware_mirror_path = \"bzip2/bzip2-1.0.8.tar.gz\"\n\n executables = [r'^bzip2$']\n\n version('1.0.8', sha256='ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269')\n version('1.0.7', sha256='e768a87c5b1a79511499beb41500bcc4caf203726fff46a6f5f9ad27fe08ab2b')\n version('1.0.6', sha256='a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd')\n\n variant('shared', default=True, description='Enables the build of shared libraries.')\n variant('pic', default=False, description='Build static libraries with PIC')\n variant('debug', default=False, description='Enable debug symbols and disable optimization')\n\n depends_on('diffutils', type='build')\n\n @classmethod\n def determine_version(cls, exe):\n output = Executable(exe)('--help', output=str, error=str)\n match = re.search(r'bzip2, a block-sorting file compressor.'\n ' Version ([^,]+)', output)\n return match.group(1) if match else None\n\n # override default implementation\n @property\n def libs(self):\n shared = '+shared' in self.spec\n return find_libraries(\n 'libbz2', root=self.prefix, shared=shared, recursive=True\n )\n\n def flag_handler(self, name, flags):\n if name == 'cflags':\n if '+pic' in self.spec:\n flags.append(self.compiler.cc_pic_flag)\n if '+debug' in self.spec:\n flags.append('-g')\n return(flags, None, None)\n\n def patch(self):\n if self.spec.satisfies('+debug'):\n for makefile in ['Makefile', 'Makefile-libbz2_so']:\n filter_file(r'-O ', '-O0 ', makefile)\n filter_file(r'-O2 ', '-O0 ', makefile)\n\n # bzip2 comes with two separate Makefiles for static and dynamic builds\n # Tell both to use Spack's compiler wrapper instead of GCC\n filter_file(r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile')\n filter_file(\n r'^CC=gcc', 'CC={0}'.format(spack_cc), 'Makefile-libbz2_so'\n )\n\n # The Makefiles use GCC flags that are incompatible with PGI\n if self.spec.satisfies('%pgi') or self.spec.satisfies('%nvhpc@:20.11'):\n filter_file('-Wall -Winline', '-Minform=inform', 'Makefile')\n filter_file('-Wall -Winline', '-Minform=inform',\n 'Makefile-libbz2_so')\n\n # Patch the link line to use RPATHs on macOS\n if 'darwin' in self.spec.architecture:\n v = self.spec.version\n v1, v2, v3 = (v.up_to(i) for i in (1, 2, 3))\n\n kwargs = {'ignore_absent': False, 'backup': False, 'string': True}\n\n mf = FileFilter('Makefile-libbz2_so')\n mf.filter('$(CC) -shared -Wl,-soname -Wl,libbz2.so.{0} -o libbz2.so.{1} $(OBJS)' # noqa\n .format(v2, v3),\n '$(CC) -dynamiclib -Wl,-install_name -Wl,@rpath/libbz2.{0}.dylib -current_version {1} -compatibility_version {2} -o libbz2.{3}.dylib $(OBJS)' # noqa\n .format(v1, v2, v3, v3),\n **kwargs)\n\n mf.filter(\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.so.{0}'.format(v3), # noqa\n '$(CC) $(CFLAGS) -o bzip2-shared bzip2.c libbz2.{0}.dylib'\n .format(v3), **kwargs)\n mf.filter(\n 'rm -f libbz2.so.{0}'.format(v2),\n 'rm -f libbz2.{0}.dylib'.format(v2), **kwargs)\n mf.filter(\n 'ln -s libbz2.so.{0} libbz2.so.{1}'.format(v3, v2),\n 'ln -s libbz2.{0}.dylib libbz2.{1}.dylib'.format(v3, v2),\n **kwargs)\n\n def install(self, spec, prefix):\n # Build the dynamic library first\n if '+shared' in spec:\n make('-f', 'Makefile-libbz2_so')\n\n # Build the static library and everything else\n make()\n make('install', 'PREFIX={0}'.format(prefix))\n\n if '+shared' in spec:\n install('bzip2-shared', join_path(prefix.bin, 'bzip2'))\n\n v1, v2, v3 = (self.spec.version.up_to(i) for i in (1, 2, 3))\n if 'darwin' in self.spec.architecture:\n lib = 'libbz2.dylib'\n lib1, lib2, lib3 = ('libbz2.{0}.dylib'.format(v)\n for v in (v1, v2, v3))\n else:\n lib = 'libbz2.so'\n lib1, lib2, lib3 = ('libbz2.so.{0}'.format(v)\n for v in (v1, v2, v3))\n\n install(lib3, join_path(prefix.lib, lib3))\n with working_dir(prefix.lib):\n for libname in (lib, lib1, lib2):\n symlink(lib3, libname)\n\n with working_dir(prefix.bin):\n force_remove('bunzip2', 'bzcat')\n symlink('bzip2', 'bunzip2')\n symlink('bzip2', 'bzcat')\n",
"path": "var/spack/repos/builtin/packages/bzip2/package.py"
}
] | diff --git a/var/spack/repos/builtin/packages/bzip2/package.py b/var/spack/repos/builtin/packages/bzip2/package.py
index 437dc14dec2ae6..f3211d1c42b22b 100644
--- a/var/spack/repos/builtin/packages/bzip2/package.py
+++ b/var/spack/repos/builtin/packages/bzip2/package.py
@@ -54,7 +54,7 @@ def flag_handler(self, name, flags):
return(flags, None, None)
def patch(self):
- if spec.satisfies('+debug'):
+ if self.spec.satisfies('+debug'):
for makefile in ['Makefile', 'Makefile-libbz2_so']:
filter_file(r'-O ', '-O0 ', makefile)
filter_file(r'-O2 ', '-O0 ', makefile)
| Installation issue: bzip2: python error in package.py
#23230 appeared to introduce a typo/python error in package.py
### Steps to reproduce the issue
```console
$ spack install bzip2
...
==> Installing bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei
==> No binary for bzip2-1.0.8-4efigg64jltb6topl5suvz4dmpvupmei found: installing from source
==> Warning: included configuration files should be updated manually [files=/software/spack/dev-environments/gcc840/packages-gcc840.yaml, /software/spack/dev-environments/common/packages-common.yaml]
==> Using cached archive: /software/spack/git.2021.04.28/var/spack/cache/_source-cache/archive/ab/ab5a03176ee106d3f0fa90e381da478ddae405918153cca248e682cd0c4a2269.tar.gz
==> Error: NameError: name 'spec' is not defined
/software/spack/git.2021.04.28/var/spack/repos/builtin/packages/bzip2/package.py:57, in patch:
56 def patch(self):
>> 57 if spec.satisfies('+debug'):
58 for makefile in ['Makefile', 'Makefile-libbz2_so']:
59 filter_file(r'-O ', '-O0 ', makefile)
60 filter_file(r'-O2 ', '-O0 ', makefile)
...
```
### Information on your system
```console
$ spack debug report
* **Spack:** 0.16.1-2429-f5e6c32495
* **Python:** 3.6.8
* **Platform:** linux-rhel8-x86_64
* **Concretizer:** original
```
### Additional information
Does not reach point of creating spack-build-out.txt, etc
No maintainers, I believe issue was added by @scheibelp in #23230
### General information
- [X ] I have run `spack debug report` and reported the version of Spack/Python/Platform
- [X ] I have run `spack maintainers <name-of-the-package>` and @mentioned any maintainers
- [X ] I have uploaded the build log and environment files
(Not applicable/none generated)
- [X ] I have searched the issues of this repo and believe this is not a duplicate
|
ranaroussi__yfinance-1257 | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport datetime as _datetime\nimport pytz as _tz\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport os as _os\nimport appdirs as _ad\n\nfrom base64 import b64decode\nimport hashlib\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n # NOTE: if decide to use 'pycryptodome', set min version to 3.6.6\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nfrom threading import Lock\nmutex = Lock()\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\nuser_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef is_isin(string):\n return bool(_re.match(\"^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$\", string))\n\n\ndef get_all_by_isin(isin, proxy=None, session=None):\n if not(is_isin(isin)):\n raise ValueError(\"Invalid ISIN number\")\n\n from .base import _BASE_URL_\n session = session or _requests\n url = \"{}/v1/finance/search?q={}\".format(_BASE_URL_, isin)\n data = session.get(url=url, proxies=proxy, headers=user_agent_headers)\n try:\n data = data.json()\n ticker = data.get('quotes', [{}])[0]\n return {\n 'ticker': {\n 'symbol': ticker['symbol'],\n 'shortname': ticker['shortname'],\n 'longname': ticker['longname'],\n 'type': ticker['quoteType'],\n 'exchange': ticker['exchDisp'],\n },\n 'news': data.get('news', [])\n }\n except Exception:\n return {}\n\n\ndef get_ticker_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {}).get('symbol', '')\n\n\ndef get_info_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {})\n\n\ndef get_news_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('news', {})\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef empty_earnings_dates_df():\n empty = _pd.DataFrame(\n columns=[\"Symbol\", \"Company\", \"Earnings Date\",\n \"EPS Estimate\", \"Reported EPS\", \"Surprise(%)\"])\n return empty\n\n\ndef get_html(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n return html\n\n\n\ndef decrypt_cryptojs_stores(data):\n \"\"\"\n Yahoo has started encrypting data stores, this method decrypts it.\n :param data: Python dict of the json data\n :return: The decrypted string data in data['context']['dispatcher']['stores']\n \"\"\"\n\n _cs = data[\"_cs\"]\n # Assumes _cr has format like: '{\"words\":[-449732894,601032952,157396918,2056341829],\"sigBytes\":16}';\n _cr = _json.loads(data[\"_cr\"])\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in _cr[\"words\"])\n\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n\n encrypted_stores = data['context']['dispatcher']['stores']\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n return plaintext\n\ndef _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n\ndef get_json(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)\n\n if \"_cs\" in data and \"_cr\" in data:\n data_stores = _json.loads(decrypt_cryptojs_stores(data))\n else:\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n # Keep old code, just in case\n data_stores = data['context']['dispatcher']['stores']\n else:\n data_stores = data\n\n if not 'QuoteSummaryStore' in data_stores:\n # Problem in data. Either delisted, or Yahoo spam triggered\n return {}\n\n data = data_stores['QuoteSummaryStore']\n # add data about Shares Outstanding for companies' tickers if they are available\n try:\n data['annualBasicAverageShares'] = \\\n data_stores['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']\n except Exception:\n pass\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef _parse_user_dt(dt, exchange_tz):\n if isinstance(dt, int):\n ## Should already be epoch, test with conversion:\n _datetime.datetime.fromtimestamp(dt)\n else:\n # Convert str/date -> datetime, set tzinfo=exchange, get timestamp:\n if isinstance(dt, str):\n dt = _datetime.datetime.strptime(str(dt), '%Y-%m-%d')\n if isinstance(dt, _datetime.date) and not isinstance(dt, _datetime.datetime):\n dt = _datetime.datetime.combine(dt, _datetime.time(0))\n if isinstance(dt, _datetime.datetime) and dt.tzinfo is None:\n # Assume user is referring to exchange's timezone\n dt = _tz.timezone(exchange_tz).localize(dt)\n dt = int(dt.timestamp())\n return dt\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n return quotes\n\n\ndef parse_actions(data):\n dividends = _pd.DataFrame(\n columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))\n splits = _pd.DataFrame(\n columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\ndef fix_Yahoo_dst_issue(df, interval):\n if interval in [\"1d\",\"1w\",\"1wk\"]:\n # These intervals should start at time 00:00. But for some combinations of date and timezone, \n # Yahoo has time off by few hours (e.g. Brazil 23:00 around Jan-2022). Suspect DST problem.\n # The clue is (a) minutes=0 and (b) hour near 0. \n # Obviously Yahoo meant 00:00, so ensure this doesn't affect date conversion:\n f_pre_midnight = (df.index.minute == 0) & (df.index.hour.isin([22,23]))\n dst_error_hours = _np.array([0]*df.shape[0])\n dst_error_hours[f_pre_midnight] = 24-df.index[f_pre_midnight].hour\n df.index += _pd.TimedeltaIndex(dst_error_hours, 'h')\n return df\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# Simple file cache of ticker->timezone:\n_cache_dp = None\ndef get_cache_dirpath():\n if _cache_dp is None:\n dp = _os.path.join(_ad.user_cache_dir(), \"py-yfinance\")\n else:\n dp = _os.path.join(_cache_dp, \"py-yfinance\")\n return dp\ndef set_tz_cache_location(dp):\n global _cache_dp\n _cache_dp = dp\n\ndef cache_lookup_tkr_tz(tkr):\n fp = _os.path.join(get_cache_dirpath(), \"tkr-tz.csv\")\n if not _os.path.isfile(fp):\n return None\n\n mutex.acquire()\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n mutex.release()\n if tkr in df.index:\n return df.loc[tkr,\"Tz\"]\n else:\n return None\ndef cache_store_tkr_tz(tkr,tz):\n\n dp = get_cache_dirpath()\n fp = _os.path.join(dp, \"tkr-tz.csv\")\n mutex.acquire()\n if not _os.path.isdir(dp):\n _os.makedirs(dp)\n if (not _os.path.isfile(fp)) and (tz is not None):\n df = _pd.DataFrame({\"Tz\":[tz]}, index=[tkr])\n df.index.name = \"Ticker\"\n df.to_csv(fp)\n\n else:\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n if tz is None:\n # Delete if in cache:\n if tkr in df.index:\n df.drop(tkr).to_csv(fp)\n else:\n if tkr in df.index:\n raise Exception(\"Tkr {} tz already in cache\".format(tkr))\n df.loc[tkr,\"Tz\"] = tz\n df.to_csv(fp)\n \n mutex.release()\n\n",
"path": "yfinance/utils.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# yfinance - market data downloader\n# https://github.com/ranaroussi/yfinance\n#\n# Copyright 2017-2019 Ran Aroussi\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\n\nimport datetime as _datetime\nimport pytz as _tz\nimport requests as _requests\nimport re as _re\nimport pandas as _pd\nimport numpy as _np\nimport sys as _sys\nimport os as _os\nimport appdirs as _ad\n\nfrom base64 import b64decode\nimport hashlib\nusePycryptodome = False # slightly faster\n# usePycryptodome = True\nif usePycryptodome:\n # NOTE: if decide to use 'pycryptodome', set min version to 3.6.6\n from Crypto.Cipher import AES\n from Crypto.Util.Padding import unpad\nelse:\n from cryptography.hazmat.primitives import padding\n from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\n\nfrom threading import Lock\nmutex = Lock()\n\ntry:\n import ujson as _json\nexcept ImportError:\n import json as _json\n\n\nuser_agent_headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef is_isin(string):\n return bool(_re.match(\"^([A-Z]{2})([A-Z0-9]{9})([0-9]{1})$\", string))\n\n\ndef get_all_by_isin(isin, proxy=None, session=None):\n if not(is_isin(isin)):\n raise ValueError(\"Invalid ISIN number\")\n\n from .base import _BASE_URL_\n session = session or _requests\n url = \"{}/v1/finance/search?q={}\".format(_BASE_URL_, isin)\n data = session.get(url=url, proxies=proxy, headers=user_agent_headers)\n try:\n data = data.json()\n ticker = data.get('quotes', [{}])[0]\n return {\n 'ticker': {\n 'symbol': ticker['symbol'],\n 'shortname': ticker['shortname'],\n 'longname': ticker['longname'],\n 'type': ticker['quoteType'],\n 'exchange': ticker['exchDisp'],\n },\n 'news': data.get('news', [])\n }\n except Exception:\n return {}\n\n\ndef get_ticker_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {}).get('symbol', '')\n\n\ndef get_info_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('ticker', {})\n\n\ndef get_news_by_isin(isin, proxy=None, session=None):\n data = get_all_by_isin(isin, proxy, session)\n return data.get('news', {})\n\n\ndef empty_df(index=[]):\n empty = _pd.DataFrame(index=index, data={\n 'Open': _np.nan, 'High': _np.nan, 'Low': _np.nan,\n 'Close': _np.nan, 'Adj Close': _np.nan, 'Volume': _np.nan})\n empty.index.name = 'Date'\n return empty\n\n\ndef empty_earnings_dates_df():\n empty = _pd.DataFrame(\n columns=[\"Symbol\", \"Company\", \"Earnings Date\",\n \"EPS Estimate\", \"Reported EPS\", \"Surprise(%)\"])\n return empty\n\n\ndef get_html(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n return html\n\n\n\ndef decrypt_cryptojs_stores(data):\n \"\"\"\n Yahoo has started encrypting data stores, this method decrypts it.\n :param data: Python dict of the json data\n :return: The decrypted string data in data['context']['dispatcher']['stores']\n \"\"\"\n\n _cs = data[\"_cs\"]\n # Assumes _cr has format like: '{\"words\":[-449732894,601032952,157396918,2056341829],\"sigBytes\":16}';\n _cr = _json.loads(data[\"_cr\"])\n _cr = b\"\".join(int.to_bytes(i, length=4, byteorder=\"big\", signed=True) for i in _cr[\"words\"])\n\n password = hashlib.pbkdf2_hmac(\"sha1\", _cs.encode(\"utf8\"), _cr, 1, dklen=32).hex()\n\n encrypted_stores = data['context']['dispatcher']['stores']\n encrypted_stores = b64decode(encrypted_stores)\n assert encrypted_stores[0:8] == b\"Salted__\"\n salt = encrypted_stores[8:16]\n encrypted_stores = encrypted_stores[16:]\n\n key, iv = _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\")\n\n if usePycryptodome:\n cipher = AES.new(key, AES.MODE_CBC, iv=iv)\n plaintext = cipher.decrypt(encrypted_stores)\n plaintext = unpad(plaintext, 16, style=\"pkcs7\")\n else:\n cipher = Cipher(algorithms.AES(key), modes.CBC(iv))\n decryptor = cipher.decryptor()\n plaintext = decryptor.update(encrypted_stores) + decryptor.finalize()\n unpadder = padding.PKCS7(128).unpadder()\n plaintext = unpadder.update(plaintext) + unpadder.finalize()\n plaintext = plaintext.decode(\"utf-8\")\n\n return plaintext\n\ndef _EVPKDF(password, salt, keySize=32, ivSize=16, iterations=1, hashAlgorithm=\"md5\") -> tuple:\n \"\"\"OpenSSL EVP Key Derivation Function\n Args:\n password (Union[str, bytes, bytearray]): Password to generate key from.\n salt (Union[bytes, bytearray]): Salt to use.\n keySize (int, optional): Output key length in bytes. Defaults to 32.\n ivSize (int, optional): Output Initialization Vector (IV) length in bytes. Defaults to 16.\n iterations (int, optional): Number of iterations to perform. Defaults to 1.\n hashAlgorithm (str, optional): Hash algorithm to use for the KDF. Defaults to 'md5'.\n Returns:\n key, iv: Derived key and Initialization Vector (IV) bytes.\n\n Taken from: https://gist.github.com/rafiibrahim8/0cd0f8c46896cafef6486cb1a50a16d3\n OpenSSL original code: https://github.com/openssl/openssl/blob/master/crypto/evp/evp_key.c#L78\n \"\"\"\n\n assert iterations > 0, \"Iterations can not be less than 1.\"\n\n if isinstance(password, str):\n password = password.encode(\"utf-8\")\n\n final_length = keySize + ivSize\n key_iv = b\"\"\n block = None\n\n while len(key_iv) < final_length:\n hasher = hashlib.new(hashAlgorithm)\n if block:\n hasher.update(block)\n hasher.update(password)\n hasher.update(salt)\n block = hasher.digest()\n for _ in range(1, iterations):\n block = hashlib.new(hashAlgorithm, block).digest()\n key_iv += block\n\n key, iv = key_iv[:keySize], key_iv[keySize:final_length]\n return key, iv\n\n\ndef get_json(url, proxy=None, session=None):\n session = session or _requests\n html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text\n\n if not \"root.App.main =\" in html:\n return {}\n\n json_str = html.split('root.App.main =')[1].split(\n '(this)')[0].split(';\\n}')[0].strip()\n data = _json.loads(json_str)\n\n if \"_cs\" in data and \"_cr\" in data:\n data_stores = _json.loads(decrypt_cryptojs_stores(data))\n else:\n if \"context\" in data and \"dispatcher\" in data[\"context\"]:\n # Keep old code, just in case\n data_stores = data['context']['dispatcher']['stores']\n else:\n data_stores = data\n\n if not 'QuoteSummaryStore' in data_stores:\n # Problem in data. Either delisted, or Yahoo spam triggered\n return {}\n\n data = data_stores['QuoteSummaryStore']\n # add data about Shares Outstanding for companies' tickers if they are available\n try:\n data['annualBasicAverageShares'] = \\\n data_stores['QuoteTimeSeriesStore']['timeSeries']['annualBasicAverageShares']\n except Exception:\n pass\n\n # return data\n new_data = _json.dumps(data).replace('{}', 'null')\n new_data = _re.sub(\n r'\\{[\\'|\\\"]raw[\\'|\\\"]:(.*?),(.*?)\\}', r'\\1', new_data)\n\n return _json.loads(new_data)\n\n\ndef camel2title(o):\n return [_re.sub(\"([a-z])([A-Z])\", r\"\\g<1> \\g<2>\", i).title() for i in o]\n\n\ndef _parse_user_dt(dt, exchange_tz):\n if isinstance(dt, int):\n ## Should already be epoch, test with conversion:\n _datetime.datetime.fromtimestamp(dt)\n else:\n # Convert str/date -> datetime, set tzinfo=exchange, get timestamp:\n if isinstance(dt, str):\n dt = _datetime.datetime.strptime(str(dt), '%Y-%m-%d')\n if isinstance(dt, _datetime.date) and not isinstance(dt, _datetime.datetime):\n dt = _datetime.datetime.combine(dt, _datetime.time(0))\n if isinstance(dt, _datetime.datetime) and dt.tzinfo is None:\n # Assume user is referring to exchange's timezone\n dt = _tz.timezone(exchange_tz).localize(dt)\n dt = int(dt.timestamp())\n return dt\n\n\ndef auto_adjust(data):\n df = data.copy()\n ratio = df[\"Close\"] / df[\"Adj Close\"]\n df[\"Adj Open\"] = df[\"Open\"] / ratio\n df[\"Adj High\"] = df[\"High\"] / ratio\n df[\"Adj Low\"] = df[\"Low\"] / ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\", \"Adj Close\": \"Close\"\n }, inplace=True)\n\n df = df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef back_adjust(data):\n \"\"\" back-adjusted data to mimic true historical prices \"\"\"\n\n df = data.copy()\n ratio = df[\"Adj Close\"] / df[\"Close\"]\n df[\"Adj Open\"] = df[\"Open\"] * ratio\n df[\"Adj High\"] = df[\"High\"] * ratio\n df[\"Adj Low\"] = df[\"Low\"] * ratio\n\n df.drop(\n [\"Open\", \"High\", \"Low\", \"Adj Close\"],\n axis=1, inplace=True)\n\n df.rename(columns={\n \"Adj Open\": \"Open\", \"Adj High\": \"High\",\n \"Adj Low\": \"Low\"\n }, inplace=True)\n\n return df[[\"Open\", \"High\", \"Low\", \"Close\", \"Volume\"]]\n\n\ndef parse_quotes(data):\n timestamps = data[\"timestamp\"]\n ohlc = data[\"indicators\"][\"quote\"][0]\n volumes = ohlc[\"volume\"]\n opens = ohlc[\"open\"]\n closes = ohlc[\"close\"]\n lows = ohlc[\"low\"]\n highs = ohlc[\"high\"]\n\n adjclose = closes\n if \"adjclose\" in data[\"indicators\"]:\n adjclose = data[\"indicators\"][\"adjclose\"][0][\"adjclose\"]\n\n quotes = _pd.DataFrame({\"Open\": opens,\n \"High\": highs,\n \"Low\": lows,\n \"Close\": closes,\n \"Adj Close\": adjclose,\n \"Volume\": volumes})\n\n quotes.index = _pd.to_datetime(timestamps, unit=\"s\")\n quotes.sort_index(inplace=True)\n\n return quotes\n\n\ndef parse_actions(data):\n dividends = _pd.DataFrame(\n columns=[\"Dividends\"], index=_pd.DatetimeIndex([]))\n splits = _pd.DataFrame(\n columns=[\"Stock Splits\"], index=_pd.DatetimeIndex([]))\n\n if \"events\" in data:\n if \"dividends\" in data[\"events\"]:\n dividends = _pd.DataFrame(\n data=list(data[\"events\"][\"dividends\"].values()))\n dividends.set_index(\"date\", inplace=True)\n dividends.index = _pd.to_datetime(dividends.index, unit=\"s\")\n dividends.sort_index(inplace=True)\n\n dividends.columns = [\"Dividends\"]\n\n if \"splits\" in data[\"events\"]:\n splits = _pd.DataFrame(\n data=list(data[\"events\"][\"splits\"].values()))\n splits.set_index(\"date\", inplace=True)\n splits.index = _pd.to_datetime(splits.index, unit=\"s\")\n splits.sort_index(inplace=True)\n splits[\"Stock Splits\"] = splits[\"numerator\"] / \\\n splits[\"denominator\"]\n splits = splits[\"Stock Splits\"]\n\n return dividends, splits\n\n\ndef fix_Yahoo_dst_issue(df, interval):\n if interval in [\"1d\",\"1w\",\"1wk\"]:\n # These intervals should start at time 00:00. But for some combinations of date and timezone, \n # Yahoo has time off by few hours (e.g. Brazil 23:00 around Jan-2022). Suspect DST problem.\n # The clue is (a) minutes=0 and (b) hour near 0. \n # Obviously Yahoo meant 00:00, so ensure this doesn't affect date conversion:\n f_pre_midnight = (df.index.minute == 0) & (df.index.hour.isin([22,23]))\n dst_error_hours = _np.array([0]*df.shape[0])\n dst_error_hours[f_pre_midnight] = 24-df.index[f_pre_midnight].hour\n df.index += _pd.TimedeltaIndex(dst_error_hours, 'h')\n return df\n\n\nclass ProgressBar:\n def __init__(self, iterations, text='completed'):\n self.text = text\n self.iterations = iterations\n self.prog_bar = '[]'\n self.fill_char = '*'\n self.width = 50\n self.__update_amount(0)\n self.elapsed = 1\n\n def completed(self):\n if self.elapsed > self.iterations:\n self.elapsed = self.iterations\n self.update_iteration(1)\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n print()\n\n def animate(self, iteration=None):\n if iteration is None:\n self.elapsed += 1\n iteration = self.elapsed\n else:\n self.elapsed += iteration\n\n print('\\r' + str(self), end='')\n _sys.stdout.flush()\n self.update_iteration()\n\n def update_iteration(self, val=None):\n val = val if val is not None else self.elapsed / float(self.iterations)\n self.__update_amount(val * 100.0)\n self.prog_bar += ' %s of %s %s' % (\n self.elapsed, self.iterations, self.text)\n\n def __update_amount(self, new_amount):\n percent_done = int(round((new_amount / 100.0) * 100.0))\n all_full = self.width - 2\n num_hashes = int(round((percent_done / 100.0) * all_full))\n self.prog_bar = '[' + self.fill_char * \\\n num_hashes + ' ' * (all_full - num_hashes) + ']'\n pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))\n pct_string = '%d%%' % percent_done\n self.prog_bar = self.prog_bar[0:pct_place] + \\\n (pct_string + self.prog_bar[pct_place + len(pct_string):])\n\n def __str__(self):\n return str(self.prog_bar)\n\n\n# Simple file cache of ticker->timezone:\n_cache_dp = None\ndef get_cache_dirpath():\n if _cache_dp is None:\n dp = _os.path.join(_ad.user_cache_dir(), \"py-yfinance\")\n else:\n dp = _os.path.join(_cache_dp, \"py-yfinance\")\n return dp\ndef set_tz_cache_location(dp):\n global _cache_dp\n _cache_dp = dp\n\ndef cache_lookup_tkr_tz(tkr):\n fp = _os.path.join(get_cache_dirpath(), \"tkr-tz.csv\")\n if not _os.path.isfile(fp):\n return None\n\n mutex.acquire()\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n mutex.release()\n if tkr in df.index:\n return df.loc[tkr,\"Tz\"]\n else:\n return None\ndef cache_store_tkr_tz(tkr,tz):\n\n dp = get_cache_dirpath()\n fp = _os.path.join(dp, \"tkr-tz.csv\")\n mutex.acquire()\n if not _os.path.isdir(dp):\n _os.makedirs(dp)\n if (not _os.path.isfile(fp)) and (tz is not None):\n df = _pd.DataFrame({\"Tz\":[tz]}, index=[tkr])\n df.index.name = \"Ticker\"\n df.to_csv(fp)\n\n else:\n df = _pd.read_csv(fp, index_col=\"Ticker\", on_bad_lines=\"skip\")\n if tz is None:\n # Delete if in cache:\n if tkr in df.index:\n df.drop(tkr).to_csv(fp)\n else:\n if tkr in df.index:\n raise Exception(\"Tkr {} tz already in cache\".format(tkr))\n df.loc[tkr,\"Tz\"] = tz\n df.to_csv(fp)\n \n mutex.release()\n\n",
"path": "yfinance/utils.py"
}
] | diff --git a/test_yfinance.py b/test_yfinance.py
index 69ce9d7dc..abc87ecaa 100644
--- a/test_yfinance.py
+++ b/test_yfinance.py
@@ -20,8 +20,15 @@
session = None
import requests_cache ; session = requests_cache.CachedSession("yfinance.cache", expire_after=24*60*60)
-symbols = ['MSFT', 'IWO', 'VFINX', '^GSPC', 'BTC-USD']
-tickers = [yf.Ticker(symbol, session=session) for symbol in symbols]
+# Good symbols = all attributes should work
+good_symbols = ['MSFT', 'IWO', 'VFINX', '^GSPC', 'BTC-USD']
+good_tickers = [yf.Ticker(symbol, session=session) for symbol in good_symbols]
+# Dodgy symbols = Yahoo data incomplete, so exclude from some tests
+dodgy_symbols = ["G7W.DU"]
+dodgy_tickers = [yf.Ticker(symbol, session=session) for symbol in dodgy_symbols]
+symbols = good_symbols + dodgy_symbols
+tickers = good_tickers + dodgy_tickers
+# Delisted = no data expected but yfinance shouldn't raise exception
delisted_symbols = ["BRK.B", "SDLP"]
delisted_tickers = [yf.Ticker(symbol, session=session) for symbol in delisted_symbols]
@@ -118,8 +125,7 @@ def test_attributes_nofail(self):
ticker.earnings_dates
def test_holders(self):
- for ticker in tickers:
- assert(ticker.info is not None and ticker.info != {})
+ for ticker in good_tickers:
assert(ticker.major_holders is not None)
assert(ticker.institutional_holders is not None)
diff --git a/yfinance/utils.py b/yfinance/utils.py
index 75ab8b645..ac02ab33a 100644
--- a/yfinance/utils.py
+++ b/yfinance/utils.py
@@ -202,6 +202,9 @@ def get_json(url, proxy=None, session=None):
session = session or _requests
html = session.get(url=url, proxies=proxy, headers=user_agent_headers).text
+ if not "root.App.main =" in html:
+ return {}
+
json_str = html.split('root.App.main =')[1].split(
'(this)')[0].split(';\n}')[0].strip()
data = _json.loads(json_str)
| utils.py: list index out of range
There is a strange behaviour with yfinance 0.1.94 when I try to read ticker "G7W.DU":
Sometimes it works and sometimes the utils.py gets a list index out of range error.
What I expect (and sometimes works):
```
$ python
Python 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import yfinance as yf
>>> t = "G7W.DU"
>>> ticker = yf.Ticker(t)
>>> ticker.info["regularMarketPrice"]
97
```
What I often get:
```
$ python
Python 3.10.9 (main, Dec 11 2022, 14:50:46) [GCC 11.3.0] on linux
Type "help", "copyright", "credits" or "license" for more information.
>>> import yfinance as yf
>>> t = "G7W.DU"
>>> ticker = yf.Ticker(t)
>>> ticker.info["regularMarketPrice"]
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/ticker.py", line 147, in info
return self.get_info()
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py", line 742, in get_info
self._get_info(proxy)
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/base.py", line 424, in _get_info
data = utils.get_json(ticker_url, proxy, self.session)
File "/home/foo/.local/lib/python3.10/site-packages/yfinance/utils.py", line 205, in get_json
json_str = html.split('root.App.main =')[1].split(
IndexError: list index out of range
```
There seems to be something special with G7W.DU because I only get the error there, so far. I tried 5 tickers so far and only that one creates this error.
|
kubeflow__pipelines-4118 | [
{
"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.22.0 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.22.0',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n",
"path": "samples/core/iris/iris.py"
}
] | [
{
"content": "#!/usr/bin/env python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py\"\"\"\n\nimport os\nimport kfp\nfrom typing import Text\n\nimport absl\nimport tensorflow_model_analysis as tfma\n\nfrom tfx.components import CsvExampleGen\nfrom tfx.components import Evaluator\nfrom tfx.components import ExampleValidator\nfrom tfx.components import Pusher\nfrom tfx.components import ResolverNode\nfrom tfx.components import SchemaGen\nfrom tfx.components import StatisticsGen\nfrom tfx.components import Trainer\nfrom tfx.components import Transform\nfrom tfx.components.base import executor_spec\nfrom tfx.components.trainer.executor import GenericExecutor\nfrom tfx.dsl.experimental import latest_blessed_model_resolver\nfrom tfx.orchestration import data_types\nfrom tfx.orchestration import pipeline\nfrom tfx.orchestration.kubeflow import kubeflow_dag_runner\nfrom tfx.proto import trainer_pb2\nfrom tfx.proto import pusher_pb2\nfrom tfx.types import Channel\nfrom tfx.types.standard_artifacts import Model\nfrom tfx.types.standard_artifacts import ModelBlessing\nfrom tfx.utils.dsl_utils import external_input\n\n_pipeline_name = 'iris_native_keras'\n\n# This example assumes that Iris flowers data is stored in GCS and the\n# utility function is in iris_utils.py. Feel free to customize as needed.\n_data_root_param = data_types.RuntimeParameter(\n name='data-root',\n default='gs://ml-pipeline/sample-data/iris/data',\n ptype=Text,\n)\n\n# Python module file to inject customized logic into the TFX components. The\n# Transform and Trainer both require user-defined functions to run successfully.\n# This file is fork from https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_utils_native_keras.py\n# and baked into the TFX image used in the pipeline.\n_module_file_param = data_types.RuntimeParameter(\n name='module-file',\n default=\n '/tfx-src/tfx/examples/iris/iris_utils_native_keras.py',\n ptype=Text,\n)\n\n# Directory and data locations. This example assumes all of the flowers\n# example code and metadata library is relative to a GCS path.\n# Note: if one deployed KFP from GKE marketplace, it's possible to leverage\n# the following magic placeholder to auto-populate the default GCS bucket\n# associated with KFP deployment. Otherwise you'll need to replace it with your\n# actual bucket name here or when creating a run.\n_pipeline_root = os.path.join(\n 'gs://{{kfp-default-bucket}}', 'tfx_iris', kfp.dsl.RUN_ID_PLACEHOLDER\n)\n\n\ndef _create_pipeline(\n pipeline_name: Text, pipeline_root: Text\n) -> pipeline.Pipeline:\n \"\"\"Implements the Iris flowers pipeline with TFX.\"\"\"\n examples = external_input(_data_root_param)\n\n # Brings data into the pipeline or otherwise joins/converts training data.\n example_gen = CsvExampleGen(input=examples)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = StatisticsGen(examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n infer_schema = SchemaGen(\n statistics=statistics_gen.outputs['statistics'], infer_feature_shape=True\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=infer_schema.outputs['schema']\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = Transform(\n examples=example_gen.outputs['examples'],\n schema=infer_schema.outputs['schema'],\n module_file=_module_file_param\n )\n\n # Uses user-provided Python function that implements a model using Keras.\n trainer = Trainer(\n module_file=_module_file_param,\n custom_executor_spec=executor_spec.ExecutorClassSpec(GenericExecutor),\n examples=transform.outputs['transformed_examples'],\n transform_graph=transform.outputs['transform_graph'],\n schema=infer_schema.outputs['schema'],\n train_args=trainer_pb2.TrainArgs(num_steps=100),\n eval_args=trainer_pb2.EvalArgs(num_steps=50)\n )\n\n # Get the latest blessed model for model validation.\n model_resolver = ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n\n # Uses TFMA to compute an evaluation statistics over features of a model and\n # perform quality validation of a candidate model (compared to a baseline).\n # Note: to compile this successfully you'll need TFMA at >= 0.21.5\n eval_config = tfma.EvalConfig(\n model_specs=[\n tfma.ModelSpec(name='candidate', label_key='variety'),\n tfma.ModelSpec(\n name='baseline', label_key='variety', is_baseline=True\n )\n ],\n slicing_specs=[\n tfma.SlicingSpec(),\n # Data can be sliced along a feature column. Required by TFMA visualization.\n tfma.SlicingSpec(feature_keys=['sepal_length'])],\n metrics_specs=[\n tfma.MetricsSpec(\n metrics=[\n tfma.MetricConfig(\n class_name='SparseCategoricalAccuracy',\n threshold=tfma.config.MetricThreshold(\n value_threshold=tfma.GenericValueThreshold(\n lower_bound={'value': 0.9}\n ),\n change_threshold=tfma.GenericChangeThreshold(\n direction=tfma.MetricDirection.HIGHER_IS_BETTER,\n absolute={'value': -1e-10}\n )\n )\n )\n ]\n )\n ]\n )\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_analyzer = Evaluator(\n examples=example_gen.outputs['examples'],\n model=trainer.outputs['model'],\n baseline_model=model_resolver.outputs['model'],\n # Change threshold will be ignored if there is no baseline (first run).\n eval_config=eval_config\n )\n\n # Checks whether the model passed the validation steps and pushes the model\n # to a file destination if check passed.\n pusher = Pusher(\n model=trainer.outputs['model'],\n model_blessing=model_analyzer.outputs['blessing'],\n push_destination=pusher_pb2.PushDestination(\n filesystem=pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.\n join(str(pipeline.ROOT_PARAMETER), 'model_serving')\n )\n )\n )\n\n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, statistics_gen, infer_schema, validate_stats, transform,\n trainer, model_resolver, model_analyzer, pusher\n ],\n enable_cache=True,\n )\n\n\nif __name__ == '__main__':\n absl.logging.set_verbosity(absl.logging.INFO)\n # Make sure the version of TFX image used is consistent with the version of\n # TFX SDK. Here we use tfx:0.21.2 image.\n config = kubeflow_dag_runner.KubeflowDagRunnerConfig(\n kubeflow_metadata_config=kubeflow_dag_runner.\n get_default_kubeflow_metadata_config(),\n tfx_image='gcr.io/tfx-oss-public/tfx:0.21.2',\n )\n kfp_runner = kubeflow_dag_runner.KubeflowDagRunner(\n output_filename=__file__ + '.yaml', config=config\n )\n kfp_runner.run(\n _create_pipeline(\n pipeline_name=_pipeline_name, pipeline_root=_pipeline_root\n )\n )\n",
"path": "samples/core/iris/iris.py"
}
] | diff --git a/backend/Dockerfile b/backend/Dockerfile
index 747e3698658..499946066bc 100644
--- a/backend/Dockerfile
+++ b/backend/Dockerfile
@@ -26,7 +26,7 @@ RUN if [ "$use_remote_build" = "true" ]; then \
# Compile
FROM python:3.5 as compiler
RUN apt-get update -y && \
- apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev
+ apt-get install --no-install-recommends -y -q default-jdk python3-setuptools python3-dev jq
RUN wget https://bootstrap.pypa.io/get-pip.py && python3 get-pip.py
COPY backend/requirements.txt .
RUN python3 -m pip install -r requirements.txt
@@ -41,23 +41,18 @@ COPY sdk sdk
WORKDIR /go/src/github.com/kubeflow/pipelines/sdk/python
RUN python3 setup.py install
-WORKDIR /samples
-COPY ./samples .
-
-# We need to check that all samples have been compiled without error.
-# For find program, the -exec argument is a filter predicate just like -name. It
-# only affects whether the file is "found", not the find's exit code.
-# One way to solve this problem is to check whether we have any python pipelines
-# that cannot compile. Here the exit code is the number of such files:
-# RUN bash -e -c 'exit $(find . -maxdepth 2 -name "*.py" ! -exec dsl-compile --py {} --output {}.tar.gz \; -print | wc -l)'
-# I think it's better to just use a shell loop though.
-# RUN for pipeline in $(find . -maxdepth 2 -name '*.py' -type f); do dsl-compile --py "$pipeline" --output "$pipeline.tar.gz"; done
-# The "for" loop breaks on all whitespace, so we either need to override IFS or
-# use the "read" command instead.
-RUN line="import kfp;kfp.components.default_base_image_or_builder='gcr.io/google-appengine/python:2020-03-31-141326";\
- set -e; find core tutorials -maxdepth 2 -name '*.py' -type f | while read pipeline; do \
- awk -v text="$line" '!/^#/ && !p {print text; p=1} 1' "$pipeline" && \
- python3 "$pipeline"; \
+WORKDIR /
+COPY ./samples /samples
+COPY backend/src/apiserver/config/sample_config.json /samples/
+
+# Compiling the preloaded samples.
+# The default image is replaced with the GCR-hosted python image.
+RUN set -e; \
+ < /samples/sample_config.json jq .[].file --raw-output | while read pipeline_yaml; do \
+ pipeline_py="${pipeline_yaml%.yaml}"; \
+ mv "$pipeline_py" "${pipeline_py}.tmp"; \
+ echo 'import kfp; kfp.components.default_base_image_or_builder="gcr.io/google-appengine/python:2020-03-31-141326"' | cat - "${pipeline_py}.tmp" > "$pipeline_py"; \
+ python3 "$pipeline_py"; \
done
FROM debian:stretch
diff --git a/samples/core/iris/iris.py b/samples/core/iris/iris.py
index 6509779aae9..2c95a6abdd8 100644
--- a/samples/core/iris/iris.py
+++ b/samples/core/iris/iris.py
@@ -14,10 +14,6 @@
# limitations under the License.
"""Iris flowers example using TFX. Based on https://github.com/tensorflow/tfx/blob/master/tfx/examples/iris/iris_pipeline_native_keras.py"""
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-
import os
import kfp
from typing import Text
| allow output artifact store configuration (vs hard coded)
it seems like the output artifacts are always stored in a specific minio service, port, namespace, bucket, secrets, etc (`minio-service.kubeflow:9000`).
see: https://github.com/kubeflow/pipelines/blob/f40a22a3f4a8e06d20cf3e3f425b5058d5c87e0b/sdk/python/kfp/compiler/_op_to_template.py#L148
it would be great to make it flexible, e.g. allow using S3, or change namespace or bucket names.
i suggest making it configurable, i can do such PR if we agree its needed.
flexible pipeline service (host) path in client SDK
when creating an SDK `Client()` the path to `ml-pipeline` API service is loaded from a hard coded value (`ml-pipeline.kubeflow.svc.cluster.local:8888`) which indicate a specific k8s namespace. it can be valuable to load that default value from an env variable, i.e. changing the line in `_client.py` from:
`config.host = host if host else Client.IN_CLUSTER_DNS_NAME`
to:
`config.host = host or os.environ.get('ML_PIPELINE_DNS_NAME',Client.IN_CLUSTER_DNS_NAME)`
also note that when a user provide the `host` parameter, the ipython output points to the API server and not to the UI service (see the logic in `_get_url_prefix()`), it seems like a potential bug
if its acceptable i can submit a PR for the line change above
|
pallets__click-1839 | [
{
"content": "\"\"\"\nThis module contains implementations for the termui module. To keep the\nimport time of Click down, some infrequently used functionality is\nplaced in this module and only imported as needed.\n\"\"\"\nimport contextlib\nimport math\nimport os\nimport sys\nimport time\nfrom gettext import gettext as _\n\nfrom ._compat import _default_text_stdout\nfrom ._compat import CYGWIN\nfrom ._compat import get_best_encoding\nfrom ._compat import isatty\nfrom ._compat import open_stream\nfrom ._compat import strip_ansi\nfrom ._compat import term_len\nfrom ._compat import WIN\nfrom .exceptions import ClickException\nfrom .utils import echo\n\nif os.name == \"nt\":\n BEFORE_BAR = \"\\r\"\n AFTER_BAR = \"\\n\"\nelse:\n BEFORE_BAR = \"\\r\\033[?25l\"\n AFTER_BAR = \"\\033[?25h\\n\"\n\n\ndef _length_hint(obj):\n \"\"\"Returns the length hint of an object.\"\"\"\n try:\n return len(obj)\n except (AttributeError, TypeError):\n try:\n get_hint = type(obj).__length_hint__\n except AttributeError:\n return None\n try:\n hint = get_hint(obj)\n except TypeError:\n return None\n if hint is NotImplemented or not isinstance(hint, int) or hint < 0:\n return None\n return hint\n\n\nclass ProgressBar:\n def __init__(\n self,\n iterable,\n length=None,\n fill_char=\"#\",\n empty_char=\" \",\n bar_template=\"%(bar)s\",\n info_sep=\" \",\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n label=None,\n file=None,\n color=None,\n update_min_steps=1,\n width=30,\n ):\n self.fill_char = fill_char\n self.empty_char = empty_char\n self.bar_template = bar_template\n self.info_sep = info_sep\n self.show_eta = show_eta\n self.show_percent = show_percent\n self.show_pos = show_pos\n self.item_show_func = item_show_func\n self.label = label or \"\"\n if file is None:\n file = _default_text_stdout()\n self.file = file\n self.color = color\n self.update_min_steps = update_min_steps\n self._completed_intervals = 0\n self.width = width\n self.autowidth = width == 0\n\n if length is None:\n length = _length_hint(iterable)\n if iterable is None:\n if length is None:\n raise TypeError(\"iterable or length is required\")\n iterable = range(length)\n self.iter = iter(iterable)\n self.length = length\n self.length_known = length is not None\n self.pos = 0\n self.avg = []\n self.start = self.last_eta = time.time()\n self.eta_known = False\n self.finished = False\n self.max_width = None\n self.entered = False\n self.current_item = None\n self.is_hidden = not isatty(self.file)\n self._last_line = None\n\n def __enter__(self):\n self.entered = True\n self.render_progress()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.render_finish()\n\n def __iter__(self):\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n self.render_progress()\n return self.generator()\n\n def __next__(self):\n # Iteration is defined in terms of a generator function,\n # returned by iter(self); use that to define next(). This works\n # because `self.iter` is an iterable consumed by that generator,\n # so it is re-entry safe. Calling `next(self.generator())`\n # twice works and does \"what you want\".\n return next(iter(self))\n\n def render_finish(self):\n if self.is_hidden:\n return\n self.file.write(AFTER_BAR)\n self.file.flush()\n\n @property\n def pct(self):\n if self.finished:\n return 1.0\n return min(self.pos / (float(self.length) or 1), 1.0)\n\n @property\n def time_per_iteration(self):\n if not self.avg:\n return 0.0\n return sum(self.avg) / float(len(self.avg))\n\n @property\n def eta(self):\n if self.length_known and not self.finished:\n return self.time_per_iteration * (self.length - self.pos)\n return 0.0\n\n def format_eta(self):\n if self.eta_known:\n t = int(self.eta)\n seconds = t % 60\n t //= 60\n minutes = t % 60\n t //= 60\n hours = t % 24\n t //= 24\n if t > 0:\n return f\"{t}d {hours:02}:{minutes:02}:{seconds:02}\"\n else:\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"\n return \"\"\n\n def format_pos(self):\n pos = str(self.pos)\n if self.length_known:\n pos += f\"/{self.length}\"\n return pos\n\n def format_pct(self):\n return f\"{int(self.pct * 100): 4}%\"[1:]\n\n def format_bar(self):\n if self.length_known:\n bar_length = int(self.pct * self.width)\n bar = self.fill_char * bar_length\n bar += self.empty_char * (self.width - bar_length)\n elif self.finished:\n bar = self.fill_char * self.width\n else:\n bar = list(self.empty_char * (self.width or 1))\n if self.time_per_iteration != 0:\n bar[\n int(\n (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)\n * self.width\n )\n ] = self.fill_char\n bar = \"\".join(bar)\n return bar\n\n def format_progress_line(self):\n show_percent = self.show_percent\n\n info_bits = []\n if self.length_known and show_percent is None:\n show_percent = not self.show_pos\n\n if self.show_pos:\n info_bits.append(self.format_pos())\n if show_percent:\n info_bits.append(self.format_pct())\n if self.show_eta and self.eta_known and not self.finished:\n info_bits.append(self.format_eta())\n if self.item_show_func is not None:\n item_info = self.item_show_func(self.current_item)\n if item_info is not None:\n info_bits.append(item_info)\n\n return (\n self.bar_template\n % {\n \"label\": self.label,\n \"bar\": self.format_bar(),\n \"info\": self.info_sep.join(info_bits),\n }\n ).rstrip()\n\n def render_progress(self):\n import shutil\n\n if self.is_hidden:\n # Only output the label as it changes if the output is not a\n # TTY. Use file=stderr if you expect to be piping stdout.\n if self._last_line != self.label:\n self._last_line = self.label\n echo(self.label, file=self.file, color=self.color)\n\n return\n\n buf = []\n # Update width in case the terminal has been resized\n if self.autowidth:\n old_width = self.width\n self.width = 0\n clutter_length = term_len(self.format_progress_line())\n new_width = max(0, shutil.get_terminal_size().columns - clutter_length)\n if new_width < old_width:\n buf.append(BEFORE_BAR)\n buf.append(\" \" * self.max_width)\n self.max_width = new_width\n self.width = new_width\n\n clear_width = self.width\n if self.max_width is not None:\n clear_width = self.max_width\n\n buf.append(BEFORE_BAR)\n line = self.format_progress_line()\n line_len = term_len(line)\n if self.max_width is None or self.max_width < line_len:\n self.max_width = line_len\n\n buf.append(line)\n buf.append(\" \" * (clear_width - line_len))\n line = \"\".join(buf)\n # Render the line only if it changed.\n\n if line != self._last_line:\n self._last_line = line\n echo(line, file=self.file, color=self.color, nl=False)\n self.file.flush()\n\n def make_step(self, n_steps):\n self.pos += n_steps\n if self.length_known and self.pos >= self.length:\n self.finished = True\n\n if (time.time() - self.last_eta) < 1.0:\n return\n\n self.last_eta = time.time()\n\n # self.avg is a rolling list of length <= 7 of steps where steps are\n # defined as time elapsed divided by the total progress through\n # self.length.\n if self.pos:\n step = (time.time() - self.start) / self.pos\n else:\n step = time.time() - self.start\n\n self.avg = self.avg[-6:] + [step]\n\n self.eta_known = self.length_known\n\n def update(self, n_steps, current_item=None):\n \"\"\"Update the progress bar by advancing a specified number of\n steps, and optionally set the ``current_item`` for this new\n position.\n\n :param n_steps: Number of steps to advance.\n :param current_item: Optional item to set as ``current_item``\n for the updated position.\n\n .. versionchanged:: 8.0\n Added the ``current_item`` optional parameter.\n\n .. versionchanged:: 8.0\n Only render when the number of steps meets the\n ``update_min_steps`` threshold.\n \"\"\"\n if current_item is not None:\n self.current_item = current_item\n\n self._completed_intervals += n_steps\n\n if self._completed_intervals >= self.update_min_steps:\n self.make_step(self._completed_intervals)\n self.render_progress()\n self._completed_intervals = 0\n\n def finish(self):\n self.eta_known = 0\n self.current_item = None\n self.finished = True\n\n def generator(self):\n \"\"\"Return a generator which yields the items added to the bar\n during construction, and updates the progress bar *after* the\n yielded block returns.\n \"\"\"\n # WARNING: the iterator interface for `ProgressBar` relies on\n # this and only works because this is a simple generator which\n # doesn't create or manage additional state. If this function\n # changes, the impact should be evaluated both against\n # `iter(bar)` and `next(bar)`. `next()` in particular may call\n # `self.generator()` repeatedly, and this must remain safe in\n # order for that interface to work.\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n\n if self.is_hidden:\n yield from self.iter\n else:\n for rv in self.iter:\n self.current_item = rv\n\n # This allows show_item_func to be updated before the\n # item is processed. Only trigger at the beginning of\n # the update interval.\n if self._completed_intervals == 0:\n self.render_progress()\n\n yield rv\n self.update(1)\n\n self.finish()\n self.render_progress()\n\n\ndef pager(generator, color=None):\n \"\"\"Decide what method to use for paging through text.\"\"\"\n stdout = _default_text_stdout()\n if not isatty(sys.stdin) or not isatty(stdout):\n return _nullpager(stdout, generator, color)\n pager_cmd = (os.environ.get(\"PAGER\", None) or \"\").strip()\n if pager_cmd:\n if WIN:\n return _tempfilepager(generator, pager_cmd, color)\n return _pipepager(generator, pager_cmd, color)\n if os.environ.get(\"TERM\") in (\"dumb\", \"emacs\"):\n return _nullpager(stdout, generator, color)\n if WIN or sys.platform.startswith(\"os2\"):\n return _tempfilepager(generator, \"more <\", color)\n if hasattr(os, \"system\") and os.system(\"(less) 2>/dev/null\") == 0:\n return _pipepager(generator, \"less\", color)\n\n import tempfile\n\n fd, filename = tempfile.mkstemp()\n os.close(fd)\n try:\n if hasattr(os, \"system\") and os.system(f'more \"{filename}\"') == 0:\n return _pipepager(generator, \"more\", color)\n return _nullpager(stdout, generator, color)\n finally:\n os.unlink(filename)\n\n\ndef _pipepager(generator, cmd, color):\n \"\"\"Page through text by feeding it to another program. Invoking a\n pager through this might support colors.\n \"\"\"\n import subprocess\n\n env = dict(os.environ)\n\n # If we're piping to less we might support colors under the\n # condition that\n cmd_detail = cmd.rsplit(\"/\", 1)[-1].split()\n if color is None and cmd_detail[0] == \"less\":\n less_flags = f\"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}\"\n if not less_flags:\n env[\"LESS\"] = \"-R\"\n color = True\n elif \"r\" in less_flags or \"R\" in less_flags:\n color = True\n\n c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)\n encoding = get_best_encoding(c.stdin)\n try:\n for text in generator:\n if not color:\n text = strip_ansi(text)\n\n c.stdin.write(text.encode(encoding, \"replace\"))\n except (OSError, KeyboardInterrupt):\n pass\n else:\n c.stdin.close()\n\n # Less doesn't respect ^C, but catches it for its own UI purposes (aborting\n # search or other commands inside less).\n #\n # That means when the user hits ^C, the parent process (click) terminates,\n # but less is still alive, paging the output and messing up the terminal.\n #\n # If the user wants to make the pager exit on ^C, they should set\n # `LESS='-K'`. It's not our decision to make.\n while True:\n try:\n c.wait()\n except KeyboardInterrupt:\n pass\n else:\n break\n\n\ndef _tempfilepager(generator, cmd, color):\n \"\"\"Page through text by invoking a program on a temporary file.\"\"\"\n import tempfile\n\n filename = tempfile.mkstemp()\n # TODO: This never terminates if the passed generator never terminates.\n text = \"\".join(generator)\n if not color:\n text = strip_ansi(text)\n encoding = get_best_encoding(sys.stdout)\n with open_stream(filename, \"wb\")[0] as f:\n f.write(text.encode(encoding))\n try:\n os.system(f'{cmd} \"{filename}\"')\n finally:\n os.unlink(filename)\n\n\ndef _nullpager(stream, generator, color):\n \"\"\"Simply print unformatted text. This is the ultimate fallback.\"\"\"\n for text in generator:\n if not color:\n text = strip_ansi(text)\n stream.write(text)\n\n\nclass Editor:\n def __init__(self, editor=None, env=None, require_save=True, extension=\".txt\"):\n self.editor = editor\n self.env = env\n self.require_save = require_save\n self.extension = extension\n\n def get_editor(self):\n if self.editor is not None:\n return self.editor\n for key in \"VISUAL\", \"EDITOR\":\n rv = os.environ.get(key)\n if rv:\n return rv\n if WIN:\n return \"notepad\"\n for editor in \"sensible-editor\", \"vim\", \"nano\":\n if os.system(f\"which {editor} >/dev/null 2>&1\") == 0:\n return editor\n return \"vi\"\n\n def edit_file(self, filename):\n import subprocess\n\n editor = self.get_editor()\n if self.env:\n environ = os.environ.copy()\n environ.update(self.env)\n else:\n environ = None\n try:\n c = subprocess.Popen(f'{editor} \"{filename}\"', env=environ, shell=True)\n exit_code = c.wait()\n if exit_code != 0:\n raise ClickException(\n _(\"{editor}: Editing failed\").format(editor=editor)\n )\n except OSError as e:\n raise ClickException(\n _(\"{editor}: Editing failed: {e}\").format(editor=editor, e=e)\n )\n\n def edit(self, text):\n import tempfile\n\n if not text:\n text = \"\"\n\n is_bytes = isinstance(text, (bytes, bytearray))\n\n if not is_bytes:\n if text and not text.endswith(\"\\n\"):\n text += \"\\n\"\n\n if WIN:\n text = text.replace(\"\\n\", \"\\r\\n\").encode(\"utf-8-sig\")\n else:\n text = text.encode(\"utf-8\")\n\n fd, name = tempfile.mkstemp(prefix=\"editor-\", suffix=self.extension)\n\n try:\n with os.fdopen(fd, \"wb\") as f:\n f.write(text)\n\n # If the filesystem resolution is 1 second, like Mac OS\n # 10.12 Extended, or 2 seconds, like FAT32, and the editor\n # closes very fast, require_save can fail. Set the modified\n # time to be 2 seconds in the past to work around this.\n os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))\n # Depending on the resolution, the exact value might not be\n # recorded, so get the new recorded value.\n timestamp = os.path.getmtime(name)\n\n self.edit_file(name)\n\n if self.require_save and os.path.getmtime(name) == timestamp:\n return None\n\n with open(name, \"rb\") as f:\n rv = f.read()\n\n if is_bytes:\n return rv\n\n return rv.decode(\"utf-8-sig\").replace(\"\\r\\n\", \"\\n\")\n finally:\n os.unlink(name)\n\n\ndef open_url(url, wait=False, locate=False):\n import subprocess\n\n def _unquote_file(url):\n import urllib\n\n if url.startswith(\"file://\"):\n url = urllib.unquote(url[7:])\n return url\n\n if sys.platform == \"darwin\":\n args = [\"open\"]\n if wait:\n args.append(\"-W\")\n if locate:\n args.append(\"-R\")\n args.append(_unquote_file(url))\n null = open(\"/dev/null\", \"w\")\n try:\n return subprocess.Popen(args, stderr=null).wait()\n finally:\n null.close()\n elif WIN:\n if locate:\n url = _unquote_file(url.replace('\"', \"\"))\n args = f'explorer /select,\"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"/WAIT\" if wait else \"\"\n args = f'start {wait} \"\" \"{url}\"'\n return os.system(args)\n elif CYGWIN:\n if locate:\n url = os.path.dirname(_unquote_file(url).replace('\"', \"\"))\n args = f'cygstart \"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"-w\" if wait else \"\"\n args = f'cygstart {wait} \"{url}\"'\n return os.system(args)\n\n try:\n if locate:\n url = os.path.dirname(_unquote_file(url)) or \".\"\n else:\n url = _unquote_file(url)\n c = subprocess.Popen([\"xdg-open\", url])\n if wait:\n return c.wait()\n return 0\n except OSError:\n if url.startswith((\"http://\", \"https://\")) and not locate and not wait:\n import webbrowser\n\n webbrowser.open(url)\n return 0\n return 1\n\n\ndef _translate_ch_to_exc(ch):\n if ch == \"\\x03\":\n raise KeyboardInterrupt()\n if ch == \"\\x04\" and not WIN: # Unix-like, Ctrl+D\n raise EOFError()\n if ch == \"\\x1a\" and WIN: # Windows, Ctrl+Z\n raise EOFError()\n\n\nif WIN:\n import msvcrt\n\n @contextlib.contextmanager\n def raw_terminal():\n yield\n\n def getchar(echo):\n # The function `getch` will return a bytes object corresponding to\n # the pressed character. Since Windows 10 build 1803, it will also\n # return \\x00 when called a second time after pressing a regular key.\n #\n # `getwch` does not share this probably-bugged behavior. Moreover, it\n # returns a Unicode object by default, which is what we want.\n #\n # Either of these functions will return \\x00 or \\xe0 to indicate\n # a special key, and you need to call the same function again to get\n # the \"rest\" of the code. The fun part is that \\u00e0 is\n # \"latin small letter a with grave\", so if you type that on a French\n # keyboard, you _also_ get a \\xe0.\n # E.g., consider the Up arrow. This returns \\xe0 and then \\x48. The\n # resulting Unicode string reads as \"a with grave\" + \"capital H\".\n # This is indistinguishable from when the user actually types\n # \"a with grave\" and then \"capital H\".\n #\n # When \\xe0 is returned, we assume it's part of a special-key sequence\n # and call `getwch` again, but that means that when the user types\n # the \\u00e0 character, `getchar` doesn't return until a second\n # character is typed.\n # The alternative is returning immediately, but that would mess up\n # cross-platform handling of arrow keys and others that start with\n # \\xe0. Another option is using `getch`, but then we can't reliably\n # read non-ASCII characters, because return values of `getch` are\n # limited to the current 8-bit codepage.\n #\n # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`\n # is doing the right thing in more situations than with `getch`.\n if echo:\n func = msvcrt.getwche\n else:\n func = msvcrt.getwch\n\n rv = func()\n if rv in (\"\\x00\", \"\\xe0\"):\n # \\x00 and \\xe0 are control characters that indicate special key,\n # see above.\n rv += func()\n _translate_ch_to_exc(rv)\n return rv\n\n\nelse:\n import tty\n import termios\n\n @contextlib.contextmanager\n def raw_terminal():\n if not isatty(sys.stdin):\n f = open(\"/dev/tty\")\n fd = f.fileno()\n else:\n fd = sys.stdin.fileno()\n f = None\n try:\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n yield fd\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n sys.stdout.flush()\n if f is not None:\n f.close()\n except termios.error:\n pass\n\n def getchar(echo):\n with raw_terminal() as fd:\n ch = os.read(fd, 32)\n ch = ch.decode(get_best_encoding(sys.stdin), \"replace\")\n if echo and isatty(sys.stdout):\n sys.stdout.write(ch)\n _translate_ch_to_exc(ch)\n return ch\n",
"path": "src/click/_termui_impl.py"
}
] | [
{
"content": "\"\"\"\nThis module contains implementations for the termui module. To keep the\nimport time of Click down, some infrequently used functionality is\nplaced in this module and only imported as needed.\n\"\"\"\nimport contextlib\nimport math\nimport os\nimport sys\nimport time\nfrom gettext import gettext as _\n\nfrom ._compat import _default_text_stdout\nfrom ._compat import CYGWIN\nfrom ._compat import get_best_encoding\nfrom ._compat import isatty\nfrom ._compat import open_stream\nfrom ._compat import strip_ansi\nfrom ._compat import term_len\nfrom ._compat import WIN\nfrom .exceptions import ClickException\nfrom .utils import echo\n\nif os.name == \"nt\":\n BEFORE_BAR = \"\\r\"\n AFTER_BAR = \"\\n\"\nelse:\n BEFORE_BAR = \"\\r\\033[?25l\"\n AFTER_BAR = \"\\033[?25h\\n\"\n\n\ndef _length_hint(obj):\n \"\"\"Returns the length hint of an object.\"\"\"\n try:\n return len(obj)\n except (AttributeError, TypeError):\n try:\n get_hint = type(obj).__length_hint__\n except AttributeError:\n return None\n try:\n hint = get_hint(obj)\n except TypeError:\n return None\n if hint is NotImplemented or not isinstance(hint, int) or hint < 0:\n return None\n return hint\n\n\nclass ProgressBar:\n def __init__(\n self,\n iterable,\n length=None,\n fill_char=\"#\",\n empty_char=\" \",\n bar_template=\"%(bar)s\",\n info_sep=\" \",\n show_eta=True,\n show_percent=None,\n show_pos=False,\n item_show_func=None,\n label=None,\n file=None,\n color=None,\n update_min_steps=1,\n width=30,\n ):\n self.fill_char = fill_char\n self.empty_char = empty_char\n self.bar_template = bar_template\n self.info_sep = info_sep\n self.show_eta = show_eta\n self.show_percent = show_percent\n self.show_pos = show_pos\n self.item_show_func = item_show_func\n self.label = label or \"\"\n if file is None:\n file = _default_text_stdout()\n self.file = file\n self.color = color\n self.update_min_steps = update_min_steps\n self._completed_intervals = 0\n self.width = width\n self.autowidth = width == 0\n\n if length is None:\n length = _length_hint(iterable)\n if iterable is None:\n if length is None:\n raise TypeError(\"iterable or length is required\")\n iterable = range(length)\n self.iter = iter(iterable)\n self.length = length\n self.length_known = length is not None\n self.pos = 0\n self.avg = []\n self.start = self.last_eta = time.time()\n self.eta_known = False\n self.finished = False\n self.max_width = None\n self.entered = False\n self.current_item = None\n self.is_hidden = not isatty(self.file)\n self._last_line = None\n\n def __enter__(self):\n self.entered = True\n self.render_progress()\n return self\n\n def __exit__(self, exc_type, exc_value, tb):\n self.render_finish()\n\n def __iter__(self):\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n self.render_progress()\n return self.generator()\n\n def __next__(self):\n # Iteration is defined in terms of a generator function,\n # returned by iter(self); use that to define next(). This works\n # because `self.iter` is an iterable consumed by that generator,\n # so it is re-entry safe. Calling `next(self.generator())`\n # twice works and does \"what you want\".\n return next(iter(self))\n\n def render_finish(self):\n if self.is_hidden:\n return\n self.file.write(AFTER_BAR)\n self.file.flush()\n\n @property\n def pct(self):\n if self.finished:\n return 1.0\n return min(self.pos / (float(self.length) or 1), 1.0)\n\n @property\n def time_per_iteration(self):\n if not self.avg:\n return 0.0\n return sum(self.avg) / float(len(self.avg))\n\n @property\n def eta(self):\n if self.length_known and not self.finished:\n return self.time_per_iteration * (self.length - self.pos)\n return 0.0\n\n def format_eta(self):\n if self.eta_known:\n t = int(self.eta)\n seconds = t % 60\n t //= 60\n minutes = t % 60\n t //= 60\n hours = t % 24\n t //= 24\n if t > 0:\n return f\"{t}d {hours:02}:{minutes:02}:{seconds:02}\"\n else:\n return f\"{hours:02}:{minutes:02}:{seconds:02}\"\n return \"\"\n\n def format_pos(self):\n pos = str(self.pos)\n if self.length_known:\n pos += f\"/{self.length}\"\n return pos\n\n def format_pct(self):\n return f\"{int(self.pct * 100): 4}%\"[1:]\n\n def format_bar(self):\n if self.length_known:\n bar_length = int(self.pct * self.width)\n bar = self.fill_char * bar_length\n bar += self.empty_char * (self.width - bar_length)\n elif self.finished:\n bar = self.fill_char * self.width\n else:\n bar = list(self.empty_char * (self.width or 1))\n if self.time_per_iteration != 0:\n bar[\n int(\n (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)\n * self.width\n )\n ] = self.fill_char\n bar = \"\".join(bar)\n return bar\n\n def format_progress_line(self):\n show_percent = self.show_percent\n\n info_bits = []\n if self.length_known and show_percent is None:\n show_percent = not self.show_pos\n\n if self.show_pos:\n info_bits.append(self.format_pos())\n if show_percent:\n info_bits.append(self.format_pct())\n if self.show_eta and self.eta_known and not self.finished:\n info_bits.append(self.format_eta())\n if self.item_show_func is not None:\n item_info = self.item_show_func(self.current_item)\n if item_info is not None:\n info_bits.append(item_info)\n\n return (\n self.bar_template\n % {\n \"label\": self.label,\n \"bar\": self.format_bar(),\n \"info\": self.info_sep.join(info_bits),\n }\n ).rstrip()\n\n def render_progress(self):\n import shutil\n\n if self.is_hidden:\n # Only output the label as it changes if the output is not a\n # TTY. Use file=stderr if you expect to be piping stdout.\n if self._last_line != self.label:\n self._last_line = self.label\n echo(self.label, file=self.file, color=self.color)\n\n return\n\n buf = []\n # Update width in case the terminal has been resized\n if self.autowidth:\n old_width = self.width\n self.width = 0\n clutter_length = term_len(self.format_progress_line())\n new_width = max(0, shutil.get_terminal_size().columns - clutter_length)\n if new_width < old_width:\n buf.append(BEFORE_BAR)\n buf.append(\" \" * self.max_width)\n self.max_width = new_width\n self.width = new_width\n\n clear_width = self.width\n if self.max_width is not None:\n clear_width = self.max_width\n\n buf.append(BEFORE_BAR)\n line = self.format_progress_line()\n line_len = term_len(line)\n if self.max_width is None or self.max_width < line_len:\n self.max_width = line_len\n\n buf.append(line)\n buf.append(\" \" * (clear_width - line_len))\n line = \"\".join(buf)\n # Render the line only if it changed.\n\n if line != self._last_line:\n self._last_line = line\n echo(line, file=self.file, color=self.color, nl=False)\n self.file.flush()\n\n def make_step(self, n_steps):\n self.pos += n_steps\n if self.length_known and self.pos >= self.length:\n self.finished = True\n\n if (time.time() - self.last_eta) < 1.0:\n return\n\n self.last_eta = time.time()\n\n # self.avg is a rolling list of length <= 7 of steps where steps are\n # defined as time elapsed divided by the total progress through\n # self.length.\n if self.pos:\n step = (time.time() - self.start) / self.pos\n else:\n step = time.time() - self.start\n\n self.avg = self.avg[-6:] + [step]\n\n self.eta_known = self.length_known\n\n def update(self, n_steps, current_item=None):\n \"\"\"Update the progress bar by advancing a specified number of\n steps, and optionally set the ``current_item`` for this new\n position.\n\n :param n_steps: Number of steps to advance.\n :param current_item: Optional item to set as ``current_item``\n for the updated position.\n\n .. versionchanged:: 8.0\n Added the ``current_item`` optional parameter.\n\n .. versionchanged:: 8.0\n Only render when the number of steps meets the\n ``update_min_steps`` threshold.\n \"\"\"\n if current_item is not None:\n self.current_item = current_item\n\n self._completed_intervals += n_steps\n\n if self._completed_intervals >= self.update_min_steps:\n self.make_step(self._completed_intervals)\n self.render_progress()\n self._completed_intervals = 0\n\n def finish(self):\n self.eta_known = 0\n self.current_item = None\n self.finished = True\n\n def generator(self):\n \"\"\"Return a generator which yields the items added to the bar\n during construction, and updates the progress bar *after* the\n yielded block returns.\n \"\"\"\n # WARNING: the iterator interface for `ProgressBar` relies on\n # this and only works because this is a simple generator which\n # doesn't create or manage additional state. If this function\n # changes, the impact should be evaluated both against\n # `iter(bar)` and `next(bar)`. `next()` in particular may call\n # `self.generator()` repeatedly, and this must remain safe in\n # order for that interface to work.\n if not self.entered:\n raise RuntimeError(\"You need to use progress bars in a with block.\")\n\n if self.is_hidden:\n yield from self.iter\n else:\n for rv in self.iter:\n self.current_item = rv\n\n # This allows show_item_func to be updated before the\n # item is processed. Only trigger at the beginning of\n # the update interval.\n if self._completed_intervals == 0:\n self.render_progress()\n\n yield rv\n self.update(1)\n\n self.finish()\n self.render_progress()\n\n\ndef pager(generator, color=None):\n \"\"\"Decide what method to use for paging through text.\"\"\"\n stdout = _default_text_stdout()\n if not isatty(sys.stdin) or not isatty(stdout):\n return _nullpager(stdout, generator, color)\n pager_cmd = (os.environ.get(\"PAGER\", None) or \"\").strip()\n if pager_cmd:\n if WIN:\n return _tempfilepager(generator, pager_cmd, color)\n return _pipepager(generator, pager_cmd, color)\n if os.environ.get(\"TERM\") in (\"dumb\", \"emacs\"):\n return _nullpager(stdout, generator, color)\n if WIN or sys.platform.startswith(\"os2\"):\n return _tempfilepager(generator, \"more <\", color)\n if hasattr(os, \"system\") and os.system(\"(less) 2>/dev/null\") == 0:\n return _pipepager(generator, \"less\", color)\n\n import tempfile\n\n fd, filename = tempfile.mkstemp()\n os.close(fd)\n try:\n if hasattr(os, \"system\") and os.system(f'more \"{filename}\"') == 0:\n return _pipepager(generator, \"more\", color)\n return _nullpager(stdout, generator, color)\n finally:\n os.unlink(filename)\n\n\ndef _pipepager(generator, cmd, color):\n \"\"\"Page through text by feeding it to another program. Invoking a\n pager through this might support colors.\n \"\"\"\n import subprocess\n\n env = dict(os.environ)\n\n # If we're piping to less we might support colors under the\n # condition that\n cmd_detail = cmd.rsplit(\"/\", 1)[-1].split()\n if color is None and cmd_detail[0] == \"less\":\n less_flags = f\"{os.environ.get('LESS', '')}{' '.join(cmd_detail[1:])}\"\n if not less_flags:\n env[\"LESS\"] = \"-R\"\n color = True\n elif \"r\" in less_flags or \"R\" in less_flags:\n color = True\n\n c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)\n encoding = get_best_encoding(c.stdin)\n try:\n for text in generator:\n if not color:\n text = strip_ansi(text)\n\n c.stdin.write(text.encode(encoding, \"replace\"))\n except (OSError, KeyboardInterrupt):\n pass\n else:\n c.stdin.close()\n\n # Less doesn't respect ^C, but catches it for its own UI purposes (aborting\n # search or other commands inside less).\n #\n # That means when the user hits ^C, the parent process (click) terminates,\n # but less is still alive, paging the output and messing up the terminal.\n #\n # If the user wants to make the pager exit on ^C, they should set\n # `LESS='-K'`. It's not our decision to make.\n while True:\n try:\n c.wait()\n except KeyboardInterrupt:\n pass\n else:\n break\n\n\ndef _tempfilepager(generator, cmd, color):\n \"\"\"Page through text by invoking a program on a temporary file.\"\"\"\n import tempfile\n\n filename = tempfile.mkstemp()\n # TODO: This never terminates if the passed generator never terminates.\n text = \"\".join(generator)\n if not color:\n text = strip_ansi(text)\n encoding = get_best_encoding(sys.stdout)\n with open_stream(filename, \"wb\")[0] as f:\n f.write(text.encode(encoding))\n try:\n os.system(f'{cmd} \"{filename}\"')\n finally:\n os.unlink(filename)\n\n\ndef _nullpager(stream, generator, color):\n \"\"\"Simply print unformatted text. This is the ultimate fallback.\"\"\"\n for text in generator:\n if not color:\n text = strip_ansi(text)\n stream.write(text)\n\n\nclass Editor:\n def __init__(self, editor=None, env=None, require_save=True, extension=\".txt\"):\n self.editor = editor\n self.env = env\n self.require_save = require_save\n self.extension = extension\n\n def get_editor(self):\n if self.editor is not None:\n return self.editor\n for key in \"VISUAL\", \"EDITOR\":\n rv = os.environ.get(key)\n if rv:\n return rv\n if WIN:\n return \"notepad\"\n for editor in \"sensible-editor\", \"vim\", \"nano\":\n if os.system(f\"which {editor} >/dev/null 2>&1\") == 0:\n return editor\n return \"vi\"\n\n def edit_file(self, filename):\n import subprocess\n\n editor = self.get_editor()\n if self.env:\n environ = os.environ.copy()\n environ.update(self.env)\n else:\n environ = None\n try:\n c = subprocess.Popen(f'{editor} \"{filename}\"', env=environ, shell=True)\n exit_code = c.wait()\n if exit_code != 0:\n raise ClickException(\n _(\"{editor}: Editing failed\").format(editor=editor)\n )\n except OSError as e:\n raise ClickException(\n _(\"{editor}: Editing failed: {e}\").format(editor=editor, e=e)\n )\n\n def edit(self, text):\n import tempfile\n\n if not text:\n text = \"\"\n\n is_bytes = isinstance(text, (bytes, bytearray))\n\n if not is_bytes:\n if text and not text.endswith(\"\\n\"):\n text += \"\\n\"\n\n if WIN:\n text = text.replace(\"\\n\", \"\\r\\n\").encode(\"utf-8-sig\")\n else:\n text = text.encode(\"utf-8\")\n\n fd, name = tempfile.mkstemp(prefix=\"editor-\", suffix=self.extension)\n\n try:\n with os.fdopen(fd, \"wb\") as f:\n f.write(text)\n\n # If the filesystem resolution is 1 second, like Mac OS\n # 10.12 Extended, or 2 seconds, like FAT32, and the editor\n # closes very fast, require_save can fail. Set the modified\n # time to be 2 seconds in the past to work around this.\n os.utime(name, (os.path.getatime(name), os.path.getmtime(name) - 2))\n # Depending on the resolution, the exact value might not be\n # recorded, so get the new recorded value.\n timestamp = os.path.getmtime(name)\n\n self.edit_file(name)\n\n if self.require_save and os.path.getmtime(name) == timestamp:\n return None\n\n with open(name, \"rb\") as f:\n rv = f.read()\n\n if is_bytes:\n return rv\n\n return rv.decode(\"utf-8-sig\").replace(\"\\r\\n\", \"\\n\")\n finally:\n os.unlink(name)\n\n\ndef open_url(url, wait=False, locate=False):\n import subprocess\n\n def _unquote_file(url: str) -> str:\n from urllib.parse import unquote\n\n if url.startswith(\"file://\"):\n url = unquote(url[7:])\n\n return url\n\n if sys.platform == \"darwin\":\n args = [\"open\"]\n if wait:\n args.append(\"-W\")\n if locate:\n args.append(\"-R\")\n args.append(_unquote_file(url))\n null = open(\"/dev/null\", \"w\")\n try:\n return subprocess.Popen(args, stderr=null).wait()\n finally:\n null.close()\n elif WIN:\n if locate:\n url = _unquote_file(url.replace('\"', \"\"))\n args = f'explorer /select,\"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"/WAIT\" if wait else \"\"\n args = f'start {wait} \"\" \"{url}\"'\n return os.system(args)\n elif CYGWIN:\n if locate:\n url = os.path.dirname(_unquote_file(url).replace('\"', \"\"))\n args = f'cygstart \"{url}\"'\n else:\n url = url.replace('\"', \"\")\n wait = \"-w\" if wait else \"\"\n args = f'cygstart {wait} \"{url}\"'\n return os.system(args)\n\n try:\n if locate:\n url = os.path.dirname(_unquote_file(url)) or \".\"\n else:\n url = _unquote_file(url)\n c = subprocess.Popen([\"xdg-open\", url])\n if wait:\n return c.wait()\n return 0\n except OSError:\n if url.startswith((\"http://\", \"https://\")) and not locate and not wait:\n import webbrowser\n\n webbrowser.open(url)\n return 0\n return 1\n\n\ndef _translate_ch_to_exc(ch):\n if ch == \"\\x03\":\n raise KeyboardInterrupt()\n if ch == \"\\x04\" and not WIN: # Unix-like, Ctrl+D\n raise EOFError()\n if ch == \"\\x1a\" and WIN: # Windows, Ctrl+Z\n raise EOFError()\n\n\nif WIN:\n import msvcrt\n\n @contextlib.contextmanager\n def raw_terminal():\n yield\n\n def getchar(echo):\n # The function `getch` will return a bytes object corresponding to\n # the pressed character. Since Windows 10 build 1803, it will also\n # return \\x00 when called a second time after pressing a regular key.\n #\n # `getwch` does not share this probably-bugged behavior. Moreover, it\n # returns a Unicode object by default, which is what we want.\n #\n # Either of these functions will return \\x00 or \\xe0 to indicate\n # a special key, and you need to call the same function again to get\n # the \"rest\" of the code. The fun part is that \\u00e0 is\n # \"latin small letter a with grave\", so if you type that on a French\n # keyboard, you _also_ get a \\xe0.\n # E.g., consider the Up arrow. This returns \\xe0 and then \\x48. The\n # resulting Unicode string reads as \"a with grave\" + \"capital H\".\n # This is indistinguishable from when the user actually types\n # \"a with grave\" and then \"capital H\".\n #\n # When \\xe0 is returned, we assume it's part of a special-key sequence\n # and call `getwch` again, but that means that when the user types\n # the \\u00e0 character, `getchar` doesn't return until a second\n # character is typed.\n # The alternative is returning immediately, but that would mess up\n # cross-platform handling of arrow keys and others that start with\n # \\xe0. Another option is using `getch`, but then we can't reliably\n # read non-ASCII characters, because return values of `getch` are\n # limited to the current 8-bit codepage.\n #\n # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`\n # is doing the right thing in more situations than with `getch`.\n if echo:\n func = msvcrt.getwche\n else:\n func = msvcrt.getwch\n\n rv = func()\n if rv in (\"\\x00\", \"\\xe0\"):\n # \\x00 and \\xe0 are control characters that indicate special key,\n # see above.\n rv += func()\n _translate_ch_to_exc(rv)\n return rv\n\n\nelse:\n import tty\n import termios\n\n @contextlib.contextmanager\n def raw_terminal():\n if not isatty(sys.stdin):\n f = open(\"/dev/tty\")\n fd = f.fileno()\n else:\n fd = sys.stdin.fileno()\n f = None\n try:\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(fd)\n yield fd\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n sys.stdout.flush()\n if f is not None:\n f.close()\n except termios.error:\n pass\n\n def getchar(echo):\n with raw_terminal() as fd:\n ch = os.read(fd, 32)\n ch = ch.decode(get_best_encoding(sys.stdin), \"replace\")\n if echo and isatty(sys.stdout):\n sys.stdout.write(ch)\n _translate_ch_to_exc(ch)\n return ch\n",
"path": "src/click/_termui_impl.py"
}
] | diff --git a/src/click/_termui_impl.py b/src/click/_termui_impl.py
index 0e9860bd8..46ff2190d 100644
--- a/src/click/_termui_impl.py
+++ b/src/click/_termui_impl.py
@@ -549,11 +549,12 @@ def edit(self, text):
def open_url(url, wait=False, locate=False):
import subprocess
- def _unquote_file(url):
- import urllib
+ def _unquote_file(url: str) -> str:
+ from urllib.parse import unquote
if url.startswith("file://"):
- url = urllib.unquote(url[7:])
+ url = unquote(url[7:])
+
return url
if sys.platform == "darwin":
| urllib.unquote() no longer exists
In [_termui_impl.py](https://github.com/pallets/click/blob/972becff259e4ffcd220a6cad5096f36a89fdd6d/src/click/_termui_impl.py#L556) `urllib.unquote()` is called. But [urllib](https://docs.python.org/3/library/urllib.html) is a package now. Equivalent functionality is available in the urllib.parse module.
|
google__turbinia-637 | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport httplib2\nimport json\nimport logging\nfrom operator import itemgetter\nfrom operator import attrgetter\nimport os\nimport stat\nimport time\nimport subprocess\nimport codecs\n\nfrom google import auth\nfrom prometheus_client import start_http_server\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia.config import DATETIME_FORMAT\nfrom turbinia import task_manager\nfrom turbinia import TurbiniaException\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.lib import docker_manager\nfrom turbinia.jobs import manager as job_manager\nfrom turbinia.workers import Priority\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.analysis.jupyter import JupyterAnalysisTask\nfrom turbinia.workers.finalize_request import FinalizeRequestTask\nfrom turbinia.workers.docker import DockerContainersEnumerationTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.hindsight import HindsightTask\nfrom turbinia.workers.partitions import PartitionEnumerationTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.redis import RedisAnalysisTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.volatility import VolatilityTask\nfrom turbinia.workers.worker_stat import StatTask\nfrom turbinia.workers.binary_extractor import BinaryExtractorTask\nfrom turbinia.workers.bulk_extractor import BulkExtractorTask\nfrom turbinia.workers.photorec import PhotorecTask\n\nMAX_RETRIES = 10\nRETRY_SLEEP = 60\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'finalizerequesttask': FinalizeRequestTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'JupyterAnalysisTask': JupyterAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'hindsighttask': HindsightTask,\n 'partitionenumerationtask': PartitionEnumerationTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'redisanalysistask': RedisAnalysisTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'volatilitytask': VolatilityTask,\n 'stattask': StatTask,\n 'binaryextractor': BinaryExtractorTask,\n 'bulkextractortask': BulkExtractorTask,\n 'dockertask': DockerContainersEnumerationTask,\n 'photorectask': PhotorecTask\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from libcloudforensics.providers.gcp.internal import function as gcp_function\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef get_turbinia_client(run_local=False):\n \"\"\"Return Turbinia client based on config.\n\n Returns:\n Initialized BaseTurbiniaClient or TurbiniaCeleryClient object.\n \"\"\"\n config.LoadConfig()\n # pylint: disable=no-else-return\n if config.TASK_MANAGER.lower() == 'psq':\n return BaseTurbiniaClient(run_local=run_local)\n elif config.TASK_MANAGER.lower() == 'celery':\n return TurbiniaCeleryClient(run_local=run_local)\n else:\n msg = 'Task Manager type \"{0:s}\" not implemented'.format(\n config.TASK_MANAGER)\n raise TurbiniaException(msg)\n\n\ndef check_docker_dependencies(dependencies):\n \"\"\"Checks docker dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n #TODO(wyassine): may run into issues down the line when a docker image\n # does not have bash or which installed. (no linux fs layer).\n log.info('Performing docker dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n images = docker_manager.DockerManager().list_images(return_filter='short_id')\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n docker_image = values.get('docker_image')\n # short id only pulls the first 10 characters of image id.\n if docker_image and len(docker_image) > 10:\n docker_image = docker_image[0:10]\n\n if docker_image in images:\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n stdout, stderr, ret = docker_manager.ContainerManager(\n values['docker_image']).execute_container(cmd, shell=True)\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found for job {1:s}. Please install '\n 'the dependency for the container or disable the job.'.format(\n program, job))\n job_manager.JobsManager.RegisterDockerImage(job, values['docker_image'])\n elif docker_image:\n raise TurbiniaException(\n 'Docker image {0:s} was not found for the job {1:s}. Please '\n 'update the config with the correct image id'.format(\n values['docker_image'], job))\n\n\ndef check_system_dependencies(dependencies):\n \"\"\"Checks system dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n log.info('Performing system dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n elif not values.get('docker_image'):\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n proc = subprocess.Popen(cmd, shell=True)\n proc.communicate()\n ret = proc.returncode\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found in $PATH for the job {1:s}. '\n 'Please install the dependency or disable the job.'.format(\n program, job))\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaStats(object):\n \"\"\"Statistics for Turbinia task execution.\n\n Attributes:\n count(int): The number of tasks\n min(datetime.timedelta): The minimum run time of all tasks\n max(datetime.timedelta): The maximum run time of all tasks\n mean(datetime.timedelta): The mean run time of all tasks\n tasks(list): A list of tasks to calculate stats for\n \"\"\"\n\n def __init__(self, description=None):\n self.description = description\n self.min = None\n self.mean = None\n self.max = None\n self.tasks = []\n\n def __str__(self):\n return self.format_stats()\n\n @property\n def count(self):\n \"\"\"Gets a count of the tasks in this stats object.\n\n Returns:\n Int of task count.\n \"\"\"\n return len(self.tasks)\n\n def add_task(self, task):\n \"\"\"Add a task result dict.\n\n Args:\n task(dict): The task results we want to count stats for.\n \"\"\"\n self.tasks.append(task)\n\n def calculate_stats(self):\n \"\"\"Calculates statistics of the current tasks.\"\"\"\n if not self.tasks:\n return\n\n sorted_tasks = sorted(self.tasks, key=itemgetter('run_time'))\n self.min = sorted_tasks[0]['run_time']\n self.max = sorted_tasks[len(sorted_tasks) - 1]['run_time']\n self.mean = sorted_tasks[len(sorted_tasks) // 2]['run_time']\n\n # Remove the microseconds to keep things cleaner\n self.min = self.min - timedelta(microseconds=self.min.microseconds)\n self.max = self.max - timedelta(microseconds=self.max.microseconds)\n self.mean = self.mean - timedelta(microseconds=self.mean.microseconds)\n\n def format_stats(self):\n \"\"\"Formats statistics data.\n\n Returns:\n String of statistics data\n \"\"\"\n return '{0:s}: Count: {1:d}, Min: {2!s}, Mean: {3!s}, Max: {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n def format_stats_csv(self):\n \"\"\"Formats statistics data into CSV output.\n\n Returns:\n String of statistics data in CSV format\n \"\"\"\n return '{0:s}, {1:d}, {2!s}, {3!s}, {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n\nclass BaseTurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.NAME))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n last_completed_count = -1\n last_uncompleted_count = -1\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_tasks = []\n uncompleted_tasks = []\n for task in task_results:\n if task.get('successful') is not None:\n completed_tasks.append(task)\n else:\n uncompleted_tasks.append(task)\n\n if completed_tasks and len(completed_tasks) == len(task_results):\n break\n\n completed_names = [t.get('name') for t in completed_tasks]\n completed_names = ', '.join(sorted(completed_names))\n uncompleted_names = [t.get('name') for t in uncompleted_tasks]\n uncompleted_names = ', '.join(sorted(uncompleted_names))\n total_count = len(completed_tasks) + len(uncompleted_tasks)\n msg = (\n 'Tasks completed ({0:d}/{1:d}): [{2:s}], waiting for [{3:s}].'.format(\n len(completed_tasks), total_count, completed_names,\n uncompleted_names))\n if (len(completed_tasks) > last_completed_count or\n len(uncompleted_tasks) > last_uncompleted_count):\n log.info(msg)\n else:\n log.debug(msg)\n\n last_completed_count = len(completed_tasks)\n last_uncompleted_count = len(uncompleted_tasks)\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks', output_json=False):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n (List|JSON string) of Task dict objects\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime(DATETIME_FORMAT)\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = None\n retry_count = 0\n credential_error_count = 0\n while response is None and retry_count < MAX_RETRIES:\n try:\n response = cloud_function.ExecuteFunction(\n function_name, region, func_args)\n except auth.exceptions.RefreshError as exception:\n if credential_error_count == 0:\n log.info(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Error: {0!s}'.format(\n exception))\n else:\n log.debug(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Attempt {0:d}. Error: '\n '{1!s}'.format(credential_error_count + 1, exception))\n # Note, we are intentially not incrementing the retry_count here because\n # we will retry indefinitely while we wait for the user to reauth.\n credential_error_count += 1\n except httplib2.ServerNotFoundError as exception:\n log.info(\n 'Error connecting to server, will retry [{0:d} of {1:d} retries]: '\n '{2!s}'.format(retry_count, MAX_RETRIES, exception))\n retry_count += 1\n\n if response is None:\n time.sleep(RETRY_SLEEP)\n\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result [{0!s}] from GCF: [{1!s}]'.format(\n response.get('result'), e))\n\n task_data = results[0]\n if output_json:\n try:\n json_data = json.dumps(task_data)\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not re-serialize result [{0!s}] from GCF: [{1!s}]'.format(\n str(task_data), e))\n return json_data\n\n # Convert run_time/last_update back into datetime objects\n for task in task_data:\n if task.get('run_time'):\n task['run_time'] = timedelta(seconds=task['run_time'])\n if task.get('last_update'):\n task['last_update'] = datetime.strptime(\n task['last_update'], DATETIME_FORMAT)\n\n return task_data\n\n def format_task_detail(self, task, show_files=False):\n \"\"\"Formats a single task in detail.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n\n report.append(fmt.heading2(task.get('name')))\n line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)\n report.append(fmt.bullet(line))\n report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))\n report.append(\n fmt.bullet('Executed on worker {0:s}'.format(task.get('worker_name'))))\n if task.get('report_data'):\n report.append('')\n report.append(fmt.heading3('Task Reported Data'))\n report.extend(task.get('report_data').splitlines())\n if show_files:\n report.append('')\n report.append(fmt.heading3('Saved Task Files:'))\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path)))\n report.append('')\n return report\n\n def format_worker_task(self, task):\n \"\"\"Formats a single task for Worker view.\n\n Args:\n task (dict): The task to format data for\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n report.append(\n fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n task['last_update'].strftime(DATETIME_FORMAT)), level=2))\n report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))\n report.append(\n fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))\n report.append('')\n return report\n\n def format_task(self, task, show_files=False):\n \"\"\"Formats a single task in short form.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))\n if show_files:\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return report\n\n def get_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None):\n \"\"\"Gathers statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n\n Returns:\n task_stats(dict): Mapping of statistic names to values\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n if not task_results:\n return {}\n\n task_stats = {\n 'all_tasks': TurbiniaStats('All Tasks'),\n 'successful_tasks': TurbiniaStats('Successful Tasks'),\n 'failed_tasks': TurbiniaStats('Failed Tasks'),\n 'requests': TurbiniaStats('Total Request Time'),\n # The following are dicts mapping the user/worker/type names to their\n # respective TurbiniaStats() objects.\n # Total wall-time for all tasks of a given type\n 'tasks_per_type': {},\n # Total wall-time for all tasks per Worker\n 'tasks_per_worker': {},\n # Total wall-time for all tasks per User\n 'tasks_per_user': {},\n }\n\n # map of request ids to [min time, max time]\n requests = {}\n\n for task in task_results:\n request_id = task.get('request_id')\n task_type = task.get('name')\n worker = task.get('worker_name')\n user = task.get('requester')\n if not task.get('run_time'):\n log.debug(\n 'Ignoring task {0:s} in statistics because the run_time is not '\n 'set, and it is required to calculate stats'.format(\n task.get('name')))\n continue\n\n # Stats for all/successful/failed tasks\n task_stats['all_tasks'].add_task(task)\n if task.get('successful') is True:\n task_stats['successful_tasks'].add_task(task)\n elif task.get('successful') is False:\n task_stats['failed_tasks'].add_task(task)\n\n # Stats for Tasks per Task type.\n if task_type in task_stats['tasks_per_type']:\n task_type_stats = task_stats['tasks_per_type'].get(task_type)\n else:\n task_type_stats = TurbiniaStats('Task type {0:s}'.format(task_type))\n task_stats['tasks_per_type'][task_type] = task_type_stats\n task_type_stats.add_task(task)\n\n # Stats per worker.\n if worker in task_stats['tasks_per_worker']:\n worker_stats = task_stats['tasks_per_worker'].get(worker)\n else:\n worker_stats = TurbiniaStats('Worker {0:s}'.format(worker))\n task_stats['tasks_per_worker'][worker] = worker_stats\n worker_stats.add_task(task)\n\n # Stats per submitting User.\n if user in task_stats['tasks_per_user']:\n user_stats = task_stats['tasks_per_user'].get(user)\n else:\n user_stats = TurbiniaStats('User {0:s}'.format(user))\n task_stats['tasks_per_user'][user] = user_stats\n user_stats.add_task(task)\n\n # Stats for the total request. This will, for each request, calculate the\n # start time of the earliest task and the stop time of the latest task.\n # This will give the overall run time covering all tasks in the request.\n task_start_time = task['last_update'] - task['run_time']\n task_stop_time = task['last_update']\n if request_id in requests:\n start_time, stop_time = requests[request_id]\n if task_start_time < start_time:\n requests[request_id][0] = task_start_time\n if task_stop_time > stop_time:\n requests[request_id][1] = task_stop_time\n else:\n requests[request_id] = [task_start_time, task_stop_time]\n\n # Add a fake task result for each request with our calculated times to the\n # stats module\n for min_time, max_time in requests.values():\n task = {}\n task['run_time'] = max_time - min_time\n task_stats['requests'].add_task(task)\n\n # Go over all stat objects and calculate them\n for stat_obj in task_stats.values():\n if isinstance(stat_obj, dict):\n for inner_stat_obj in stat_obj.values():\n inner_stat_obj.calculate_stats()\n else:\n stat_obj.calculate_stats()\n\n return task_stats\n\n def format_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, csv=False):\n \"\"\"Formats statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n csv (bool): Whether we want the output in CSV format.\n\n Returns:\n String of task statistics report\n \"\"\"\n task_stats = self.get_task_statistics(\n instance, project, region, days, task_id, request_id, user)\n if not task_stats:\n return 'No tasks found'\n\n stats_order = [\n 'all_tasks', 'successful_tasks', 'failed_tasks', 'requests',\n 'tasks_per_type', 'tasks_per_worker', 'tasks_per_user'\n ]\n\n if csv:\n report = ['stat_type, count, min, mean, max']\n else:\n report = ['Execution time statistics for Turbinia:', '']\n for stat_name in stats_order:\n stat_obj = task_stats[stat_name]\n if isinstance(stat_obj, dict):\n # Sort by description so that we get consistent report output\n inner_stat_objs = sorted(\n stat_obj.values(), key=attrgetter('description'))\n for inner_stat_obj in inner_stat_objs:\n if csv:\n report.append(inner_stat_obj.format_stats_csv())\n else:\n report.append(inner_stat_obj.format_stats())\n else:\n if csv:\n report.append(stat_obj.format_stats_csv())\n else:\n report.append(stat_obj.format_stats())\n\n report.append('')\n return '\\n'.join(report)\n\n def format_worker_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Workers.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include historical Task information for the worker.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of worker_node: {{task_id, task_update,\n # task_name, task_status}}\n workers_dict = {}\n scheduled_counter = 0\n for result in task_results:\n worker_node = result.get('worker_name')\n status = result.get('status')\n status = status if status else 'No task status'\n if worker_node and worker_node not in workers_dict:\n workers_dict[worker_node] = []\n if worker_node:\n task_dict = {}\n task_dict['task_id'] = result.get('id')\n task_dict['last_update'] = result.get('last_update')\n task_dict['task_name'] = result.get('name')\n task_dict['status'] = status\n # Check status for anything that is running.\n if 'running' in status:\n run_time = (datetime.now() -\n result.get('last_update')).total_seconds()\n run_time = timedelta(seconds=run_time)\n task_dict['run_time'] = run_time\n else:\n run_time = result.get('run_time')\n task_dict['run_time'] = run_time if run_time else 'No run time.'\n workers_dict[worker_node].append(task_dict)\n else:\n # Track scheduled/unassigned Tasks for reporting.\n scheduled_counter += 1\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Worker activity within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))\n report.append(\n fmt.bullet(\n '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'\n .format(scheduled_counter)))\n for worker_node, tasks in workers_dict.items():\n report.append('')\n report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))\n # Append the statuses chronologically\n run_status, queued_status, other_status = [], [], []\n for task in tasks:\n if 'running' in task['status']:\n run_status.extend(self.format_worker_task(task))\n elif 'queued' in task['status']:\n queued_status.extend(self.format_worker_task(task))\n else:\n other_status.extend(self.format_worker_task(task))\n # Add each of the status lists back to report list\n not_found = [fmt.bullet('No Tasks found.')]\n report.append(fmt.heading3('Running Tasks'))\n report.extend(run_status if run_status else not_found)\n report.append('')\n report.append(fmt.heading3('Queued Tasks'))\n report.extend(queued_status if queued_status else not_found)\n # Add Historical Tasks\n if all_fields:\n report.append('')\n report.append(fmt.heading3('Finished Tasks'))\n report.extend(other_status if other_status else not_found)\n return '\\n'.join(report)\n\n def format_request_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Requests.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include all fields for the Request, which includes,\n saved file paths.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of request_id: {saved_paths, last_update, requester,\n # task_id}\n request_dict = {}\n for result in task_results:\n request_id = result.get('request_id')\n saved_paths = result.get('saved_paths')\n if request_id not in request_dict:\n saved_paths = set(saved_paths) if saved_paths else set()\n request_dict[request_id] = {}\n request_dict[request_id]['saved_paths'] = saved_paths\n request_dict[request_id]['last_update'] = result.get('last_update')\n request_dict[request_id]['requester'] = result.get('requester')\n request_dict[request_id]['task_id'] = set([result.get('id')])\n else:\n if saved_paths:\n request_dict[request_id]['saved_paths'].update(saved_paths)\n request_dict[request_id]['task_id'].update([result.get('id')])\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Requests made within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet(\n '{0:d} requests were made within this timeframe.'.format(\n len(request_dict.keys()))))\n # Print report data for Requests\n for request_id, values in request_dict.items():\n report.append('')\n report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n values['last_update'].strftime(DATETIME_FORMAT))))\n report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))\n report.append(\n fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))\n if all_fields:\n report.append(fmt.bullet('Associated Evidence:'))\n # Append all saved paths in request\n for path in sorted(values['saved_paths']):\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return '\\n'.join(report)\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False, full_report=False,\n priority_filter=Priority.HIGH, output_json=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n full_report (bool): Generate a full markdown report instead of just a\n summary.\n priority_filter (int): Output only a summary for Tasks with a value\n greater than the priority_filter.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n String of task status in JSON or human readable format.\n \"\"\"\n if user and days == 0:\n days = 1000\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user,\n output_json=output_json)\n if not task_results:\n return ''\n\n if output_json:\n return task_results\n\n # Sort all tasks by the report_priority so that tasks with a higher\n # priority are listed first in the report.\n for result in task_results:\n # 0 is a valid value, so checking against specific values\n if result.get('report_priority') in (None, ''):\n result['report_priority'] = Priority.LOW\n task_results = sorted(task_results, key=itemgetter('report_priority'))\n num_results = len(task_results)\n if not num_results:\n msg = 'No Turbinia Tasks found.'\n log.info(msg)\n return '\\n{0:s}'.format(msg)\n\n # Build up data\n report = []\n requester = task_results[0].get('requester')\n request_id = task_results[0].get('request_id')\n success_types = ['Successful', 'Failed', 'Scheduled or Running']\n success_values = [True, False, None]\n # Reverse mapping values to types\n success_map = dict(zip(success_values, success_types))\n task_map = defaultdict(list)\n success_types.insert(0, 'High Priority')\n for task in task_results:\n if task.get('report_priority') <= priority_filter:\n task_map['High Priority'].append(task)\n else:\n task_map[success_map[task.get('successful')]].append(task)\n\n # Generate report header\n report.append('\\n')\n report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Processed {0:d} Tasks for user {1:s}'.format(\n num_results, requester)))\n\n # Print report data for tasks\n for success_type in success_types:\n report.append('')\n report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))\n if not task_map[success_type]:\n report.append(fmt.bullet('None'))\n for task in task_map[success_type]:\n if full_report and success_type == success_types[0]:\n report.extend(self.format_task_detail(task, show_files=all_fields))\n else:\n report.extend(self.format_task(task, show_files=all_fields))\n\n return '\\n'.join(report)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0])\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', region, func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(BaseTurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TurbiniaCeleryClient, self).__init__(*args, **kwargs)\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None, output_json=False):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_denylist, jobs_allowlist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(BaseTurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for celery worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs will be enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n self.worker.task(task_manager.task_runner, name='task_runner')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for PSQ Worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs are enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n",
"path": "turbinia/client.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Client objects for Turbinia.\"\"\"\n\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\nfrom datetime import datetime\nfrom datetime import timedelta\n\nimport httplib2\nimport json\nimport logging\nfrom operator import itemgetter\nfrom operator import attrgetter\nimport os\nimport stat\nimport time\nimport subprocess\nimport codecs\n\nfrom google import auth\nfrom prometheus_client import start_http_server\nfrom turbinia import config\nfrom turbinia.config import logger\nfrom turbinia.config import DATETIME_FORMAT\nfrom turbinia import task_manager\nfrom turbinia import TurbiniaException\nfrom turbinia.lib import text_formatter as fmt\nfrom turbinia.lib import docker_manager\nfrom turbinia.jobs import manager as job_manager\nfrom turbinia.workers import Priority\nfrom turbinia.workers.artifact import FileArtifactExtractionTask\nfrom turbinia.workers.analysis.wordpress import WordpressAccessLogAnalysisTask\nfrom turbinia.workers.analysis.jenkins import JenkinsAnalysisTask\nfrom turbinia.workers.analysis.jupyter import JupyterAnalysisTask\nfrom turbinia.workers.finalize_request import FinalizeRequestTask\nfrom turbinia.workers.docker import DockerContainersEnumerationTask\nfrom turbinia.workers.grep import GrepTask\nfrom turbinia.workers.hadoop import HadoopAnalysisTask\nfrom turbinia.workers.hindsight import HindsightTask\nfrom turbinia.workers.partitions import PartitionEnumerationTask\nfrom turbinia.workers.plaso import PlasoTask\nfrom turbinia.workers.psort import PsortTask\nfrom turbinia.workers.redis import RedisAnalysisTask\nfrom turbinia.workers.sshd import SSHDAnalysisTask\nfrom turbinia.workers.strings import StringsAsciiTask\nfrom turbinia.workers.strings import StringsUnicodeTask\nfrom turbinia.workers.tomcat import TomcatAnalysisTask\nfrom turbinia.workers.volatility import VolatilityTask\nfrom turbinia.workers.worker_stat import StatTask\nfrom turbinia.workers.binary_extractor import BinaryExtractorTask\nfrom turbinia.workers.bulk_extractor import BulkExtractorTask\nfrom turbinia.workers.photorec import PhotorecTask\n\nMAX_RETRIES = 10\nRETRY_SLEEP = 60\n\n# TODO(aarontp): Remove this map after\n# https://github.com/google/turbinia/issues/278 is fixed.\nTASK_MAP = {\n 'fileartifactextractiontask': FileArtifactExtractionTask,\n 'wordpressaccessloganalysistask': WordpressAccessLogAnalysisTask,\n 'finalizerequesttask': FinalizeRequestTask,\n 'jenkinsanalysistask': JenkinsAnalysisTask,\n 'JupyterAnalysisTask': JupyterAnalysisTask,\n 'greptask': GrepTask,\n 'hadoopanalysistask': HadoopAnalysisTask,\n 'hindsighttask': HindsightTask,\n 'partitionenumerationtask': PartitionEnumerationTask,\n 'plasotask': PlasoTask,\n 'psorttask': PsortTask,\n 'redisanalysistask': RedisAnalysisTask,\n 'sshdanalysistask': SSHDAnalysisTask,\n 'stringsasciitask': StringsAsciiTask,\n 'stringsunicodetask': StringsUnicodeTask,\n 'tomcatanalysistask': TomcatAnalysisTask,\n 'volatilitytask': VolatilityTask,\n 'stattask': StatTask,\n 'binaryextractor': BinaryExtractorTask,\n 'bulkextractortask': BulkExtractorTask,\n 'dockertask': DockerContainersEnumerationTask,\n 'photorectask': PhotorecTask\n}\n\nconfig.LoadConfig()\nif config.TASK_MANAGER.lower() == 'psq':\n import psq\n\n from google.cloud import exceptions\n from google.cloud import datastore\n from google.cloud import pubsub\n\n from libcloudforensics.providers.gcp.internal import function as gcp_function\nelif config.TASK_MANAGER.lower() == 'celery':\n from turbinia.state_manager import RedisStateManager\n\nlog = logging.getLogger('turbinia')\nlogger.setup()\n\n\ndef get_turbinia_client(run_local=False):\n \"\"\"Return Turbinia client based on config.\n\n Returns:\n Initialized BaseTurbiniaClient or TurbiniaCeleryClient object.\n \"\"\"\n config.LoadConfig()\n # pylint: disable=no-else-return\n if config.TASK_MANAGER.lower() == 'psq':\n return BaseTurbiniaClient(run_local=run_local)\n elif config.TASK_MANAGER.lower() == 'celery':\n return TurbiniaCeleryClient(run_local=run_local)\n else:\n msg = 'Task Manager type \"{0:s}\" not implemented'.format(\n config.TASK_MANAGER)\n raise TurbiniaException(msg)\n\n\ndef check_docker_dependencies(dependencies):\n \"\"\"Checks docker dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n #TODO(wyassine): may run into issues down the line when a docker image\n # does not have bash or which installed. (no linux fs layer).\n log.info('Performing docker dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n images = docker_manager.DockerManager().list_images(return_filter='short_id')\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n docker_image = values.get('docker_image')\n # short id only pulls the first 10 characters of image id.\n if docker_image and len(docker_image) > 10:\n docker_image = docker_image[0:10]\n\n if docker_image in images:\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n stdout, stderr, ret = docker_manager.ContainerManager(\n values['docker_image']).execute_container(cmd, shell=True)\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found for job {1:s}. Please install '\n 'the dependency for the container or disable the job.'.format(\n program, job))\n job_manager.JobsManager.RegisterDockerImage(job, values['docker_image'])\n elif docker_image:\n raise TurbiniaException(\n 'Docker image {0:s} was not found for the job {1:s}. Please '\n 'update the config with the correct image id'.format(\n values['docker_image'], job))\n\n\ndef check_system_dependencies(dependencies):\n \"\"\"Checks system dependencies.\n\n Args:\n dependencies(dict): dictionary of dependencies to check for.\n\n Raises:\n TurbiniaException: If dependency is not met.\n \"\"\"\n log.info('Performing system dependency check.')\n job_names = list(job_manager.JobsManager.GetJobNames())\n\n # Iterate through list of jobs\n for job, values in dependencies.items():\n if job not in job_names:\n log.warning(\n 'The job {0:s} was not found or has been disabled. Skipping '\n 'dependency check...'.format(job))\n continue\n elif not values.get('docker_image'):\n for program in values['programs']:\n cmd = 'type {0:s}'.format(program)\n proc = subprocess.Popen(cmd, shell=True)\n proc.communicate()\n ret = proc.returncode\n if ret != 0:\n raise TurbiniaException(\n 'Job dependency {0:s} not found in $PATH for the job {1:s}. '\n 'Please install the dependency or disable the job.'.format(\n program, job))\n\n\ndef check_directory(directory):\n \"\"\"Checks directory to make sure it exists and is writable.\n\n Args:\n directory (string): Path to directory\n\n Raises:\n TurbiniaException: When directory cannot be created or used.\n \"\"\"\n if os.path.exists(directory) and not os.path.isdir(directory):\n raise TurbiniaException(\n 'File {0:s} exists, but is not a directory'.format(directory))\n\n if not os.path.exists(directory):\n try:\n os.makedirs(directory)\n except OSError:\n raise TurbiniaException(\n 'Can not create Directory {0:s}'.format(directory))\n\n if not os.access(directory, os.W_OK):\n try:\n mode = os.stat(directory)[0]\n os.chmod(directory, mode | stat.S_IWUSR)\n except OSError:\n raise TurbiniaException(\n 'Can not add write permissions to {0:s}'.format(directory))\n\n\nclass TurbiniaStats(object):\n \"\"\"Statistics for Turbinia task execution.\n\n Attributes:\n count(int): The number of tasks\n min(datetime.timedelta): The minimum run time of all tasks\n max(datetime.timedelta): The maximum run time of all tasks\n mean(datetime.timedelta): The mean run time of all tasks\n tasks(list): A list of tasks to calculate stats for\n \"\"\"\n\n def __init__(self, description=None):\n self.description = description\n self.min = None\n self.mean = None\n self.max = None\n self.tasks = []\n\n def __str__(self):\n return self.format_stats()\n\n @property\n def count(self):\n \"\"\"Gets a count of the tasks in this stats object.\n\n Returns:\n Int of task count.\n \"\"\"\n return len(self.tasks)\n\n def add_task(self, task):\n \"\"\"Add a task result dict.\n\n Args:\n task(dict): The task results we want to count stats for.\n \"\"\"\n self.tasks.append(task)\n\n def calculate_stats(self):\n \"\"\"Calculates statistics of the current tasks.\"\"\"\n if not self.tasks:\n return\n\n sorted_tasks = sorted(self.tasks, key=itemgetter('run_time'))\n self.min = sorted_tasks[0]['run_time']\n self.max = sorted_tasks[len(sorted_tasks) - 1]['run_time']\n self.mean = sorted_tasks[len(sorted_tasks) // 2]['run_time']\n\n # Remove the microseconds to keep things cleaner\n self.min = self.min - timedelta(microseconds=self.min.microseconds)\n self.max = self.max - timedelta(microseconds=self.max.microseconds)\n self.mean = self.mean - timedelta(microseconds=self.mean.microseconds)\n\n def format_stats(self):\n \"\"\"Formats statistics data.\n\n Returns:\n String of statistics data\n \"\"\"\n return '{0:s}: Count: {1:d}, Min: {2!s}, Mean: {3!s}, Max: {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n def format_stats_csv(self):\n \"\"\"Formats statistics data into CSV output.\n\n Returns:\n String of statistics data in CSV format\n \"\"\"\n return '{0:s}, {1:d}, {2!s}, {3!s}, {4!s}'.format(\n self.description, self.count, self.min, self.mean, self.max)\n\n\nclass BaseTurbiniaClient(object):\n \"\"\"Client class for Turbinia.\n\n Attributes:\n task_manager (TaskManager): Turbinia task manager\n \"\"\"\n\n def __init__(self, run_local=False):\n config.LoadConfig()\n if run_local:\n self.task_manager = None\n else:\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(server=False)\n\n def create_task(self, task_name):\n \"\"\"Creates a Turbinia Task by name.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n\n Returns:\n TurbiniaTask: An instantiated Task object.\n\n Raises:\n TurbiniaException: When no Task object matching task_name is found.\n \"\"\"\n task_obj = TASK_MAP.get(task_name.lower())\n log.debug('Looking up Task {0:s} by name'.format(task_name))\n if not task_obj:\n raise TurbiniaException('No Task named {0:s} found'.format(task_name))\n return task_obj()\n\n def list_jobs(self):\n \"\"\"List the available jobs.\"\"\"\n # TODO(aarontp): Refactor this out so that we don't need to depend on\n # the task manager from the client.\n log.info('Available Jobs:')\n for job in self.task_manager.jobs:\n log.info('\\t{0:s}'.format(job.NAME))\n\n def wait_for_request(\n self, instance, project, region, request_id=None, user=None,\n poll_interval=60):\n \"\"\"Polls and waits for Turbinia Request to complete.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n poll_interval (int): Interval of seconds between polling cycles.\n \"\"\"\n last_completed_count = -1\n last_uncompleted_count = -1\n while True:\n task_results = self.get_task_data(\n instance, project, region, request_id=request_id, user=user)\n completed_tasks = []\n uncompleted_tasks = []\n for task in task_results:\n if task.get('successful') is not None:\n completed_tasks.append(task)\n else:\n uncompleted_tasks.append(task)\n\n if completed_tasks and len(completed_tasks) == len(task_results):\n break\n\n completed_names = [t.get('name') for t in completed_tasks]\n completed_names = ', '.join(sorted(completed_names))\n uncompleted_names = [t.get('name') for t in uncompleted_tasks]\n uncompleted_names = ', '.join(sorted(uncompleted_names))\n total_count = len(completed_tasks) + len(uncompleted_tasks)\n msg = (\n 'Tasks completed ({0:d}/{1:d}): [{2:s}], waiting for [{3:s}].'.format(\n len(completed_tasks), total_count, completed_names,\n uncompleted_names))\n if (len(completed_tasks) > last_completed_count or\n len(uncompleted_tasks) > last_uncompleted_count):\n log.info(msg)\n else:\n log.debug(msg)\n\n last_completed_count = len(completed_tasks)\n last_uncompleted_count = len(uncompleted_tasks)\n time.sleep(poll_interval)\n\n log.info('All {0:d} Tasks completed'.format(len(task_results)))\n\n def get_task_data(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, function_name='gettasks', output_json=False):\n \"\"\"Gets task data from Google Cloud Functions.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the region to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n function_name (string): The GCF function we want to call.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n (List|JSON string) of Task dict objects\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {'instance': instance, 'kind': 'TurbiniaTask'}\n\n if days:\n start_time = datetime.now() - timedelta(days=days)\n # Format this like '1990-01-01T00:00:00z' so we can cast it directly to a\n # javascript Date() object in the cloud function.\n start_string = start_time.strftime(DATETIME_FORMAT)\n func_args.update({'start_time': start_string})\n elif task_id:\n func_args.update({'task_id': task_id})\n elif request_id:\n func_args.update({'request_id': request_id})\n\n if user:\n func_args.update({'user': user})\n\n response = None\n retry_count = 0\n credential_error_count = 0\n while response is None and retry_count < MAX_RETRIES:\n try:\n response = cloud_function.ExecuteFunction(\n function_name, region, func_args)\n except auth.exceptions.RefreshError as exception:\n if credential_error_count == 0:\n log.info(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Error: {0!s}'.format(\n exception))\n else:\n log.debug(\n 'GCP Credentials need to be refreshed, please refresh in another '\n 'terminal and this process will resume. Attempt {0:d}. Error: '\n '{1!s}'.format(credential_error_count + 1, exception))\n # Note, we are intentially not incrementing the retry_count here because\n # we will retry indefinitely while we wait for the user to reauth.\n credential_error_count += 1\n except httplib2.ServerNotFoundError as exception:\n log.info(\n 'Error connecting to server, will retry [{0:d} of {1:d} retries]: '\n '{2!s}'.format(retry_count, MAX_RETRIES, exception))\n retry_count += 1\n\n if response is None:\n time.sleep(RETRY_SLEEP)\n\n if 'result' not in response:\n log.error('No results found')\n if response.get('error', '{}') != '{}':\n msg = 'Error executing Cloud Function: [{0!s}].'.format(\n response.get('error'))\n log.error(msg)\n log.debug('GCF response: {0!s}'.format(response))\n raise TurbiniaException(\n 'Cloud Function {0:s} returned no results.'.format(function_name))\n\n try:\n results = json.loads(response['result'])\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not deserialize result [{0!s}] from GCF: [{1!s}]'.format(\n response.get('result'), e))\n\n task_data = results[0]\n if output_json:\n try:\n json_data = json.dumps(task_data)\n except (TypeError, ValueError) as e:\n raise TurbiniaException(\n 'Could not re-serialize result [{0!s}] from GCF: [{1!s}]'.format(\n str(task_data), e))\n return json_data\n\n # Convert run_time/last_update back into datetime objects\n for task in task_data:\n if task.get('run_time'):\n task['run_time'] = timedelta(seconds=task['run_time'])\n if task.get('last_update'):\n task['last_update'] = datetime.strptime(\n task['last_update'], DATETIME_FORMAT)\n\n return task_data\n\n def format_task_detail(self, task, show_files=False):\n \"\"\"Formats a single task in detail.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n\n report.append(fmt.heading2(task.get('name')))\n line = '{0:s} {1:s}'.format(fmt.bold('Status:'), status)\n report.append(fmt.bullet(line))\n report.append(fmt.bullet('Task Id: {0:s}'.format(task.get('id'))))\n report.append(\n fmt.bullet('Executed on worker {0:s}'.format(task.get('worker_name'))))\n if task.get('report_data'):\n report.append('')\n report.append(fmt.heading3('Task Reported Data'))\n report.extend(task.get('report_data').splitlines())\n if show_files:\n report.append('')\n report.append(fmt.heading3('Saved Task Files:'))\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path)))\n report.append('')\n return report\n\n def format_worker_task(self, task):\n \"\"\"Formats a single task for Worker view.\n\n Args:\n task (dict): The task to format data for\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n report.append(\n fmt.bullet('{0:s} - {1:s}'.format(task['task_id'], task['task_name'])))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n task['last_update'].strftime(DATETIME_FORMAT)), level=2))\n report.append(fmt.bullet('Status: {0:s}'.format(task['status']), level=2))\n report.append(\n fmt.bullet('Run Time: {0:s}'.format(str(task['run_time'])), level=2))\n report.append('')\n return report\n\n def format_task(self, task, show_files=False):\n \"\"\"Formats a single task in short form.\n\n Args:\n task (dict): The task to format data for\n show_files (bool): Whether we want to print out log file paths\n\n Returns:\n list: Formatted task data\n \"\"\"\n report = []\n saved_paths = task.get('saved_paths') or []\n status = task.get('status') or 'No task status'\n report.append(fmt.bullet('{0:s}: {1:s}'.format(task.get('name'), status)))\n if show_files:\n for path in saved_paths:\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return report\n\n def get_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None):\n \"\"\"Gathers statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n\n Returns:\n task_stats(dict): Mapping of statistic names to values\n \"\"\"\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user)\n if not task_results:\n return {}\n\n task_stats = {\n 'all_tasks': TurbiniaStats('All Tasks'),\n 'successful_tasks': TurbiniaStats('Successful Tasks'),\n 'failed_tasks': TurbiniaStats('Failed Tasks'),\n 'requests': TurbiniaStats('Total Request Time'),\n # The following are dicts mapping the user/worker/type names to their\n # respective TurbiniaStats() objects.\n # Total wall-time for all tasks of a given type\n 'tasks_per_type': {},\n # Total wall-time for all tasks per Worker\n 'tasks_per_worker': {},\n # Total wall-time for all tasks per User\n 'tasks_per_user': {},\n }\n\n # map of request ids to [min time, max time]\n requests = {}\n\n for task in task_results:\n request_id = task.get('request_id')\n task_type = task.get('name')\n worker = task.get('worker_name')\n user = task.get('requester')\n if not task.get('run_time'):\n log.debug(\n 'Ignoring task {0:s} in statistics because the run_time is not '\n 'set, and it is required to calculate stats'.format(\n task.get('name')))\n continue\n\n # Stats for all/successful/failed tasks\n task_stats['all_tasks'].add_task(task)\n if task.get('successful') is True:\n task_stats['successful_tasks'].add_task(task)\n elif task.get('successful') is False:\n task_stats['failed_tasks'].add_task(task)\n\n # Stats for Tasks per Task type.\n if task_type in task_stats['tasks_per_type']:\n task_type_stats = task_stats['tasks_per_type'].get(task_type)\n else:\n task_type_stats = TurbiniaStats('Task type {0:s}'.format(task_type))\n task_stats['tasks_per_type'][task_type] = task_type_stats\n task_type_stats.add_task(task)\n\n # Stats per worker.\n if worker in task_stats['tasks_per_worker']:\n worker_stats = task_stats['tasks_per_worker'].get(worker)\n else:\n worker_stats = TurbiniaStats('Worker {0:s}'.format(worker))\n task_stats['tasks_per_worker'][worker] = worker_stats\n worker_stats.add_task(task)\n\n # Stats per submitting User.\n if user in task_stats['tasks_per_user']:\n user_stats = task_stats['tasks_per_user'].get(user)\n else:\n user_stats = TurbiniaStats('User {0:s}'.format(user))\n task_stats['tasks_per_user'][user] = user_stats\n user_stats.add_task(task)\n\n # Stats for the total request. This will, for each request, calculate the\n # start time of the earliest task and the stop time of the latest task.\n # This will give the overall run time covering all tasks in the request.\n task_start_time = task['last_update'] - task['run_time']\n task_stop_time = task['last_update']\n if request_id in requests:\n start_time, stop_time = requests[request_id]\n if task_start_time < start_time:\n requests[request_id][0] = task_start_time\n if task_stop_time > stop_time:\n requests[request_id][1] = task_stop_time\n else:\n requests[request_id] = [task_start_time, task_stop_time]\n\n # Add a fake task result for each request with our calculated times to the\n # stats module\n for min_time, max_time in requests.values():\n task = {}\n task['run_time'] = max_time - min_time\n task_stats['requests'].add_task(task)\n\n # Go over all stat objects and calculate them\n for stat_obj in task_stats.values():\n if isinstance(stat_obj, dict):\n for inner_stat_obj in stat_obj.values():\n inner_stat_obj.calculate_stats()\n else:\n stat_obj.calculate_stats()\n\n return task_stats\n\n def format_task_statistics(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, csv=False):\n \"\"\"Formats statistics for Turbinia execution data.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n csv (bool): Whether we want the output in CSV format.\n\n Returns:\n String of task statistics report\n \"\"\"\n task_stats = self.get_task_statistics(\n instance, project, region, days, task_id, request_id, user)\n if not task_stats:\n return 'No tasks found'\n\n stats_order = [\n 'all_tasks', 'successful_tasks', 'failed_tasks', 'requests',\n 'tasks_per_type', 'tasks_per_worker', 'tasks_per_user'\n ]\n\n if csv:\n report = ['stat_type, count, min, mean, max']\n else:\n report = ['Execution time statistics for Turbinia:', '']\n for stat_name in stats_order:\n stat_obj = task_stats[stat_name]\n if isinstance(stat_obj, dict):\n # Sort by description so that we get consistent report output\n inner_stat_objs = sorted(\n stat_obj.values(), key=attrgetter('description'))\n for inner_stat_obj in inner_stat_objs:\n if csv:\n report.append(inner_stat_obj.format_stats_csv())\n else:\n report.append(inner_stat_obj.format_stats())\n else:\n if csv:\n report.append(stat_obj.format_stats_csv())\n else:\n report.append(stat_obj.format_stats())\n\n report.append('')\n return '\\n'.join(report)\n\n def format_worker_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Workers.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include historical Task information for the worker.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of worker_node: {{task_id, task_update,\n # task_name, task_status}}\n workers_dict = {}\n scheduled_counter = 0\n for result in task_results:\n worker_node = result.get('worker_name')\n status = result.get('status')\n status = status if status else 'No task status'\n if worker_node and worker_node not in workers_dict:\n workers_dict[worker_node] = []\n if worker_node:\n task_dict = {}\n task_dict['task_id'] = result.get('id')\n task_dict['last_update'] = result.get('last_update')\n task_dict['task_name'] = result.get('name')\n task_dict['status'] = status\n # Check status for anything that is running.\n if 'running' in status:\n run_time = (datetime.now() -\n result.get('last_update')).total_seconds()\n run_time = timedelta(seconds=run_time)\n task_dict['run_time'] = run_time\n else:\n run_time = result.get('run_time')\n task_dict['run_time'] = run_time if run_time else 'No run time.'\n workers_dict[worker_node].append(task_dict)\n else:\n # Track scheduled/unassigned Tasks for reporting.\n scheduled_counter += 1\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Worker activity within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet('{0:d} Worker(s) found.'.format(len(workers_dict.keys()))))\n report.append(\n fmt.bullet(\n '{0:d} Task(s) unassigned or scheduled and pending Worker assignment.'\n .format(scheduled_counter)))\n for worker_node, tasks in workers_dict.items():\n report.append('')\n report.append(fmt.heading2('Worker Node: {0:s}'.format(worker_node)))\n # Append the statuses chronologically\n run_status, queued_status, other_status = [], [], []\n for task in tasks:\n if 'running' in task['status']:\n run_status.extend(self.format_worker_task(task))\n elif 'queued' in task['status']:\n queued_status.extend(self.format_worker_task(task))\n else:\n other_status.extend(self.format_worker_task(task))\n # Add each of the status lists back to report list\n not_found = [fmt.bullet('No Tasks found.')]\n report.append(fmt.heading3('Running Tasks'))\n report.extend(run_status if run_status else not_found)\n report.append('')\n report.append(fmt.heading3('Queued Tasks'))\n report.extend(queued_status if queued_status else not_found)\n # Add Historical Tasks\n if all_fields:\n report.append('')\n report.append(fmt.heading3('Finished Tasks'))\n report.extend(other_status if other_status else not_found)\n return '\\n'.join(report)\n\n def format_request_status(\n self, instance, project, region, days=0, all_fields=False):\n \"\"\"Formats the recent history for Turbinia Requests.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n all_fields (bool): Include all fields for the Request, which includes,\n saved file paths.\n Returns:\n String of Request status\n \"\"\"\n # Set number of days to retrieve data\n num_days = 7\n if days != 0:\n num_days = days\n task_results = self.get_task_data(instance, project, region, days=num_days)\n if not task_results:\n return ''\n\n # Sort task_results by last updated timestamp.\n task_results = sorted(\n task_results, key=itemgetter('last_update'), reverse=True)\n\n # Create dictionary of request_id: {saved_paths, last_update, requester,\n # task_id}\n request_dict = {}\n for result in task_results:\n request_id = result.get('request_id')\n saved_paths = result.get('saved_paths')\n if request_id not in request_dict:\n saved_paths = set(saved_paths) if saved_paths else set()\n request_dict[request_id] = {}\n request_dict[request_id]['saved_paths'] = saved_paths\n request_dict[request_id]['last_update'] = result.get('last_update')\n request_dict[request_id]['requester'] = result.get('requester')\n request_dict[request_id]['task_id'] = set([result.get('id')])\n else:\n if saved_paths:\n request_dict[request_id]['saved_paths'].update(saved_paths)\n request_dict[request_id]['task_id'].update([result.get('id')])\n\n # Generate report header\n report = []\n report.append(\n fmt.heading1(\n 'Turbinia report for Requests made within {0:d} days'.format(\n num_days)))\n report.append(\n fmt.bullet(\n '{0:d} requests were made within this timeframe.'.format(\n len(request_dict.keys()))))\n # Print report data for Requests\n for request_id, values in request_dict.items():\n report.append('')\n report.append(fmt.heading2('Request ID: {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Last Update: {0:s}'.format(\n values['last_update'].strftime(DATETIME_FORMAT))))\n report.append(fmt.bullet('Requester: {0:s}'.format(values['requester'])))\n report.append(\n fmt.bullet('Task Count: {0:d}'.format(len(values['task_id']))))\n if all_fields:\n report.append(fmt.bullet('Associated Evidence:'))\n # Append all saved paths in request\n for path in sorted(values['saved_paths']):\n report.append(fmt.bullet(fmt.code(path), level=2))\n report.append('')\n return '\\n'.join(report)\n\n def format_task_status(\n self, instance, project, region, days=0, task_id=None, request_id=None,\n user=None, all_fields=False, full_report=False,\n priority_filter=Priority.HIGH, output_json=False):\n \"\"\"Formats the recent history for Turbinia Tasks.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n user (string): The user of the request we want tasks for.\n all_fields (bool): Include all fields for the task, including task,\n request ids and saved file paths.\n full_report (bool): Generate a full markdown report instead of just a\n summary.\n priority_filter (int): Output only a summary for Tasks with a value\n greater than the priority_filter.\n output_json (bool): Whether to return JSON output.\n\n Returns:\n String of task status in JSON or human readable format.\n \"\"\"\n if user and days == 0:\n days = 1000\n task_results = self.get_task_data(\n instance, project, region, days, task_id, request_id, user,\n output_json=output_json)\n if not task_results:\n return ''\n\n if output_json:\n return task_results\n\n # Sort all tasks by the report_priority so that tasks with a higher\n # priority are listed first in the report.\n for result in task_results:\n # 0 is a valid value, so checking against specific values\n if result.get('report_priority') in (None, ''):\n result['report_priority'] = Priority.LOW\n task_results = sorted(task_results, key=itemgetter('report_priority'))\n num_results = len(task_results)\n if not num_results:\n msg = 'No Turbinia Tasks found.'\n log.info(msg)\n return '\\n{0:s}'.format(msg)\n\n # Build up data\n report = []\n requester = task_results[0].get('requester')\n request_id = task_results[0].get('request_id')\n success_types = ['Successful', 'Failed', 'Scheduled or Running']\n success_values = [True, False, None]\n # Reverse mapping values to types\n success_map = dict(zip(success_values, success_types))\n task_map = defaultdict(list)\n success_types.insert(0, 'High Priority')\n for task in task_results:\n if task.get('report_priority') <= priority_filter:\n task_map['High Priority'].append(task)\n else:\n task_map[success_map[task.get('successful')]].append(task)\n\n # Generate report header\n report.append('\\n')\n report.append(fmt.heading1('Turbinia report {0:s}'.format(request_id)))\n report.append(\n fmt.bullet(\n 'Processed {0:d} Tasks for user {1:s}'.format(\n num_results, requester)))\n\n # Print report data for tasks\n for success_type in success_types:\n report.append('')\n report.append(fmt.heading1('{0:s} Tasks'.format(success_type)))\n if not task_map[success_type]:\n report.append(fmt.bullet('None'))\n for task in task_map[success_type]:\n if full_report and success_type == success_types[0]:\n report.extend(self.format_task_detail(task, show_files=all_fields))\n else:\n report.extend(self.format_task(task, show_files=all_fields))\n\n return '\\n'.join(report)\n\n def run_local_task(self, task_name, request):\n \"\"\"Runs a Turbinia Task locally.\n\n Args:\n task_name(string): Name of the Task we are going to run.\n request (TurbiniaRequest): Object containing request and evidence info.\n\n Returns:\n TurbiniaTaskResult: The result returned by the Task Execution.\n \"\"\"\n task = self.create_task(task_name)\n task.request_id = request.request_id\n task.base_output_dir = config.OUTPUT_DIR\n task.run_local = True\n if not request.evidence:\n raise TurbiniaException('TurbiniaRequest does not contain evidence.')\n log.info('Running Task {0:s} locally'.format(task_name))\n result = task.run_wrapper(request.evidence[0].serialize())\n return result\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.server_pubsub.send_request(request)\n\n def close_tasks(\n self, instance, project, region, request_id=None, task_id=None, user=None,\n requester=None):\n \"\"\"Close Turbinia Tasks based on Request ID.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n project (string): The name of the project.\n region (string): The name of the zone to execute in.\n request_id (string): The Id of the request we want tasks for.\n task_id (string): The Id of the request we want task for.\n user (string): The user of the request we want tasks for.\n requester (string): The user making the request to close tasks.\n\n Returns: String of closed Task IDs.\n \"\"\"\n cloud_function = gcp_function.GoogleCloudFunction(project)\n func_args = {\n 'instance': instance,\n 'kind': 'TurbiniaTask',\n 'request_id': request_id,\n 'task_id': task_id,\n 'user': user,\n 'requester': requester\n }\n response = cloud_function.ExecuteFunction('closetasks', region, func_args)\n return 'Closed Task IDs: %s' % response.get('result')\n\n\nclass TurbiniaCeleryClient(BaseTurbiniaClient):\n \"\"\"Client class for Turbinia (Celery).\n\n Overriding some things specific to Celery operation.\n\n Attributes:\n redis (RedisStateManager): Redis datastore object\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(TurbiniaCeleryClient, self).__init__(*args, **kwargs)\n self.redis = RedisStateManager()\n\n def send_request(self, request):\n \"\"\"Sends a TurbiniaRequest message.\n\n Args:\n request: A TurbiniaRequest object.\n \"\"\"\n self.task_manager.kombu.send_request(request)\n\n # pylint: disable=arguments-differ\n def get_task_data(\n self, instance, _, __, days=0, task_id=None, request_id=None,\n function_name=None, output_json=False):\n \"\"\"Gets task data from Redis.\n\n We keep the same function signature, but ignore arguments passed for GCP.\n\n Args:\n instance (string): The Turbinia instance name (by default the same as the\n INSTANCE_ID in the config).\n days (int): The number of days we want history for.\n task_id (string): The Id of the task.\n request_id (string): The Id of the request we want tasks for.\n\n Returns:\n List of Task dict objects.\n \"\"\"\n return self.redis.get_task_data(instance, days, task_id, request_id)\n\n\nclass TurbiniaServer(object):\n \"\"\"Turbinia Server class.\n\n Attributes:\n task_manager (TaskManager): An object to manage turbinia tasks.\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initializes Turbinia Server.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n self.task_manager = task_manager.get_task_manager()\n self.task_manager.setup(jobs_denylist, jobs_allowlist)\n\n def start(self):\n \"\"\"Start Turbinia Server.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia Server.')\n self.task_manager.run()\n\n def add_evidence(self, evidence_):\n \"\"\"Add evidence to be processed.\"\"\"\n self.task_manager.add_evidence(evidence_)\n\n\nclass TurbiniaCeleryWorker(BaseTurbiniaClient):\n \"\"\"Turbinia Celery Worker class.\n\n Attributes:\n worker (celery.app): Celery worker app\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for celery worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n super(TurbiniaCeleryWorker, self).__init__()\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs will be enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n self.worker = self.task_manager.celery.app\n\n def start(self):\n \"\"\"Start Turbinia Celery Worker.\"\"\"\n log.info('Running Turbinia Celery Worker.')\n self.worker.task(task_manager.task_runner, name='task_runner')\n argv = ['celery', 'worker', '--loglevel=info', '--pool=solo']\n self.worker.start(argv)\n\n\nclass TurbiniaPsqWorker(object):\n \"\"\"Turbinia PSQ Worker class.\n\n Attributes:\n worker (psq.Worker): PSQ Worker object\n psq (psq.Queue): A Task queue object\n\n Raises:\n TurbiniaException: When errors occur\n \"\"\"\n\n def __init__(self, jobs_denylist=None, jobs_allowlist=None):\n \"\"\"Initialization for PSQ Worker.\n\n Args:\n jobs_denylist (Optional[list[str]]): Jobs we will exclude from running\n jobs_allowlist (Optional[list[str]]): The only Jobs we will include to run\n \"\"\"\n config.LoadConfig()\n psq_publisher = pubsub.PublisherClient()\n psq_subscriber = pubsub.SubscriberClient()\n datastore_client = datastore.Client(project=config.TURBINIA_PROJECT)\n try:\n self.psq = psq.Queue(\n psq_publisher, psq_subscriber, config.TURBINIA_PROJECT,\n name=config.PSQ_TOPIC, storage=psq.DatastoreStorage(datastore_client))\n except exceptions.GoogleCloudError as e:\n msg = 'Error creating PSQ Queue: {0:s}'.format(str(e))\n log.error(msg)\n raise TurbiniaException(msg)\n\n # Deregister jobs from denylist/allowlist.\n job_manager.JobsManager.DeregisterJobs(jobs_denylist, jobs_allowlist)\n disabled_jobs = list(config.DISABLED_JOBS) if config.DISABLED_JOBS else []\n disabled_jobs = [j.lower() for j in disabled_jobs]\n # Only actually disable jobs that have not been allowlisted.\n if jobs_allowlist:\n disabled_jobs = list(set(disabled_jobs) - set(jobs_allowlist))\n if disabled_jobs:\n log.info(\n 'Disabling non-allowlisted jobs configured to be disabled in the '\n 'config file: {0:s}'.format(', '.join(disabled_jobs)))\n job_manager.JobsManager.DeregisterJobs(jobs_denylist=disabled_jobs)\n\n # Check for valid dependencies/directories.\n dependencies = config.ParseDependencies()\n if config.DOCKER_ENABLED:\n check_docker_dependencies(dependencies)\n check_system_dependencies(dependencies)\n check_directory(config.MOUNT_DIR_PREFIX)\n check_directory(config.OUTPUT_DIR)\n check_directory(config.TMP_DIR)\n\n jobs = job_manager.JobsManager.GetJobNames()\n log.info(\n 'Dependency check complete. The following jobs are enabled '\n 'for this worker: {0:s}'.format(','.join(jobs)))\n log.info('Starting PSQ listener on queue {0:s}'.format(self.psq.name))\n self.worker = psq.Worker(queue=self.psq)\n\n def start(self):\n \"\"\"Start Turbinia PSQ Worker.\"\"\"\n log.info('Starting Prometheus endpoint.')\n start_http_server(port=config.PROMETHEUS_PORT, addr=config.PROMETHEUS_ADDR)\n log.info('Running Turbinia PSQ Worker.')\n self.worker.listen()\n",
"path": "turbinia/client.py"
}
] | diff --git a/turbinia/client.py b/turbinia/client.py
index 165da5d1d..4e2564331 100644
--- a/turbinia/client.py
+++ b/turbinia/client.py
@@ -1017,7 +1017,7 @@ def run_local_task(self, task_name, request):
if not request.evidence:
raise TurbiniaException('TurbiniaRequest does not contain evidence.')
log.info('Running Task {0:s} locally'.format(task_name))
- result = task.run_wrapper(request.evidence[0])
+ result = task.run_wrapper(request.evidence[0].serialize())
return result
def send_request(self, request):
| Crash when running locally
```
$ turbiniactl -t SSHDAnalysisTask -R rawdisk -l dfchecklist.img
[INFO] Turbinia version: 20190819
[INFO] Creating request 5d50f281e7fc4a24bd88993ad8bb34a9 with evidence dfchecklist.img
[INFO] Run command "turbiniactl status -r 5d50f281e7fc4a24bd88993ad8bb34a9" to see the status of this request and associated tasks
[INFO] Running Task SSHDAnalysisTask locally
Traceback (most recent call last):
File "/usr/local/google/home/romaing/venvs/turbinia/bin/turbiniactl", line 11, in <module>
load_entry_point('turbinia==20190819', 'console_scripts', 'turbiniactl')()
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/turbiniactl.py", line 813, in main
result = client.run_local_task(args.task, request)
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/client.py", line 1020, in run_local_task
result = task.run_wrapper(request.evidence[0])
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/workers/__init__.py", line 705, in run_wrapper
evidence = evidence_decode(evidence)
File "/usr/local/google/home/romaing/venvs/turbinia/lib/python3.8/site-packages/turbinia-20190819-py3.8.egg/turbinia/evidence.py", line 56, in evidence_decode
raise TurbiniaException(
turbinia.TurbiniaException: Evidence_dict is not a dictionary, type is <class 'turbinia.evidence.RawDisk'>
```
|
horovod__horovod-3002 | [
{
"content": "# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\nimport queue\n\nfrom horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt\nfrom horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager\n\n\nnotification_manager = WorkerNotificationManager()\n\n\nclass State(object):\n \"\"\"State representation used for tracking in memory state across workers.\n\n Args:\n bcast_object: Function used to broadcast a variable from rank 0 to the other workers.\n get_rank: Function that returns the current rank of this worker.\n \"\"\"\n def __init__(self, bcast_object, get_rank):\n self._bcast_object = bcast_object\n self._rank = get_rank\n self._host_messages = queue.Queue()\n self._last_updated_timestamp = 0\n self._reset_callbacks = []\n\n def register_reset_callbacks(self, callbacks):\n \"\"\"Register callbacks that will be invoked following a reset event (worker added or removed).\n\n For example, a common use of a reset callback would be to update the learning rate scale with the\n new number of workers.\n\n Args:\n callbacks: list of functions to execute.\n \"\"\"\n self._reset_callbacks.extend(callbacks)\n\n def on_reset(self):\n self._host_messages = queue.Queue()\n self.reset()\n for callback in self._reset_callbacks:\n callback()\n\n def on_hosts_updated(self, timestamp, update_res):\n self._host_messages.put((timestamp, update_res))\n\n def commit(self):\n \"\"\"Commits all modifications to state tracked by this object to host memory.\n\n This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`\n if any were detected.\n\n Because commits are a heavy operation involving data copy (potentially from GPU to host), it is\n recommended to consider committing less frequently than once per batch. This allows users to tradeoff\n between per-batch execution time and lost training steps in the event of a worker failure.\n \"\"\"\n self.save()\n self.check_host_updates()\n\n def check_host_updates(self):\n \"\"\"Checks that a notification has been sent indicating that hosts can be added or will be removed.\n\n Raises a `HostsUpdatedInterrupt` if such a notification has been received.\n \"\"\"\n # Iterate through the update messages sent from the server. If the update timestamp\n # is greater than the last update timestamp, then trigger a HostsUpdatedException.\n last_updated_timestamp = prev_timestamp = self._last_updated_timestamp\n all_update = HostUpdateResult.no_update\n while not self._host_messages.empty():\n timestamp, update = self._host_messages.get()\n if timestamp > last_updated_timestamp:\n last_updated_timestamp = timestamp\n all_update |= update\n\n # In order to ensure all workers raise the exception at the same time, we need to sync\n # the updated state across all the workers.\n # TODO(travis): this should be a max allreduce to account for changes in rank 0\n prev_timestamp, self._last_updated_timestamp, all_update = \\\n self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))\n\n # At this point, updated state is globally consistent across all ranks.\n if self._last_updated_timestamp > prev_timestamp:\n raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)\n\n\n def save(self):\n \"\"\"Saves state to host memory.\"\"\"\n raise NotImplementedError()\n\n def restore(self):\n \"\"\"Restores the last committed state, undoing any uncommitted modifications.\"\"\"\n raise NotImplementedError()\n\n def sync(self):\n \"\"\"Synchronize state across workers.\"\"\"\n raise NotImplementedError()\n\n def reset(self):\n \"\"\"Reset objects and variables following a reset event (before synchronization).\"\"\"\n pass\n\n\nclass ObjectState(State):\n \"\"\"State for simple Python objects.\n\n Every object is specified as a keyword argument, and will be assigned as an attribute.\n\n Args:\n bcast_object: Horovod broadcast object function used to sync state dictionary.\n get_rank: Horovod rank function used to identify is this process is the coordinator.\n kwargs: Properties to sync, will be exposed as attributes of the object.\n \"\"\"\n def __init__(self, bcast_object, get_rank, **kwargs):\n self._bcast_object = bcast_object\n self._saved_state = kwargs\n self._set_attrs()\n super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)\n\n def save(self):\n new_state = {}\n for attr in self._saved_state.keys():\n new_state[attr] = getattr(self, attr)\n self._saved_state = new_state\n\n def restore(self):\n self._set_attrs()\n\n def sync(self):\n if self._saved_state:\n self._saved_state = self._bcast_object(self._saved_state)\n self._set_attrs()\n\n def _set_attrs(self):\n for attr, value in self._saved_state.items():\n setattr(self, attr, value)\n\n\ndef run_fn(func, reset):\n @functools.wraps(func)\n def wrapper(state, *args, **kwargs):\n notification_manager.init()\n notification_manager.register_listener(state)\n skip_sync = False\n\n try:\n while True:\n if not skip_sync:\n state.sync()\n\n try:\n return func(state, *args, **kwargs)\n except HorovodInternalError:\n state.restore()\n skip_sync = False\n except HostsUpdatedInterrupt as e:\n skip_sync = e.skip_sync\n\n reset()\n state.on_reset()\n finally:\n notification_manager.remove_listener(state)\n return wrapper\n",
"path": "horovod/common/elastic.py"
}
] | [
{
"content": "# Copyright 2020 Uber Technologies, Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\nimport functools\nimport queue\n\nfrom horovod.common.exceptions import HorovodInternalError, HostsUpdatedInterrupt\nfrom horovod.runner.elastic.worker import HostUpdateResult, WorkerNotificationManager\n\n\nnotification_manager = WorkerNotificationManager()\n\n\nclass State(object):\n \"\"\"State representation used for tracking in memory state across workers.\n\n Args:\n bcast_object: Function used to broadcast a variable from rank 0 to the other workers.\n get_rank: Function that returns the current rank of this worker.\n \"\"\"\n def __init__(self, bcast_object, get_rank):\n self._bcast_object = bcast_object\n self._rank = get_rank\n self._host_messages = queue.Queue()\n self._last_updated_timestamp = 0\n self._reset_callbacks = []\n\n def register_reset_callbacks(self, callbacks):\n \"\"\"Register callbacks that will be invoked following a reset event (worker added or removed).\n\n For example, a common use of a reset callback would be to update the learning rate scale with the\n new number of workers.\n\n Args:\n callbacks: list of functions to execute.\n \"\"\"\n self._reset_callbacks.extend(callbacks)\n\n def on_reset(self):\n self._host_messages = queue.Queue()\n self.reset()\n for callback in self._reset_callbacks:\n callback()\n\n def on_hosts_updated(self, timestamp, update_res):\n self._host_messages.put((timestamp, update_res))\n\n def commit(self):\n \"\"\"Commits all modifications to state tracked by this object to host memory.\n\n This call will also check for any changes to known hosts, and raise a `HostsUpdatedInterrupt`\n if any were detected.\n\n Because commits are a heavy operation involving data copy (potentially from GPU to host), it is\n recommended to consider committing less frequently than once per batch. This allows users to tradeoff\n between per-batch execution time and lost training steps in the event of a worker failure.\n \"\"\"\n self.save()\n self.check_host_updates()\n\n def check_host_updates(self):\n \"\"\"Checks that a notification has been sent indicating that hosts can be added or will be removed.\n\n Raises a `HostsUpdatedInterrupt` if such a notification has been received.\n \"\"\"\n # Iterate through the update messages sent from the server. If the update timestamp\n # is greater than the last update timestamp, then trigger a HostsUpdatedException.\n last_updated_timestamp = prev_timestamp = self._last_updated_timestamp\n all_update = HostUpdateResult.no_update\n while not self._host_messages.empty():\n timestamp, update = self._host_messages.get()\n if timestamp > last_updated_timestamp:\n last_updated_timestamp = timestamp\n all_update |= update\n\n # In order to ensure all workers raise the exception at the same time, we need to sync\n # the updated state across all the workers.\n # TODO(travis): this should be a max allreduce to account for changes in rank 0\n prev_timestamp, self._last_updated_timestamp, all_update = \\\n self._bcast_object((prev_timestamp, last_updated_timestamp, all_update))\n\n # At this point, updated state is globally consistent across all ranks.\n if self._last_updated_timestamp > prev_timestamp:\n raise HostsUpdatedInterrupt(all_update == HostUpdateResult.removed)\n\n\n def save(self):\n \"\"\"Saves state to host memory.\"\"\"\n raise NotImplementedError()\n\n def restore(self):\n \"\"\"Restores the last committed state, undoing any uncommitted modifications.\"\"\"\n raise NotImplementedError()\n\n def sync(self):\n \"\"\"Synchronize state across workers.\"\"\"\n raise NotImplementedError()\n\n def reset(self):\n \"\"\"Reset objects and variables following a reset event (before synchronization).\"\"\"\n pass\n\n\nclass ObjectState(State):\n \"\"\"State for simple Python objects.\n\n Every object is specified as a keyword argument, and will be assigned as an attribute.\n\n Args:\n bcast_object: Horovod broadcast object function used to sync state dictionary.\n get_rank: Horovod rank function used to identify is this process is the coordinator.\n kwargs: Properties to sync, will be exposed as attributes of the object.\n \"\"\"\n def __init__(self, bcast_object, get_rank, **kwargs):\n self._bcast_object = bcast_object\n self._saved_state = kwargs\n self._set_attrs()\n super(ObjectState, self).__init__(bcast_object=bcast_object, get_rank=get_rank)\n\n def save(self):\n new_state = {}\n for attr in self._saved_state.keys():\n new_state[attr] = getattr(self, attr)\n self._saved_state = new_state\n\n def restore(self):\n self._set_attrs()\n\n def sync(self):\n if self._saved_state:\n self._saved_state = self._bcast_object(self._saved_state)\n self._set_attrs()\n\n def _set_attrs(self):\n for attr, value in self._saved_state.items():\n setattr(self, attr, value)\n\n\ndef run_fn(func, reset):\n @functools.wraps(func)\n def wrapper(state, *args, **kwargs):\n notification_manager.init()\n notification_manager.register_listener(state)\n skip_sync = False\n\n try:\n while True:\n try:\n if not skip_sync:\n state.sync()\n\n return func(state, *args, **kwargs)\n except HorovodInternalError:\n state.restore()\n skip_sync = False\n except HostsUpdatedInterrupt as e:\n skip_sync = e.skip_sync\n\n reset()\n state.on_reset()\n finally:\n notification_manager.remove_listener(state)\n return wrapper\n",
"path": "horovod/common/elastic.py"
}
] | diff --git a/horovod/common/elastic.py b/horovod/common/elastic.py
index 84512a299f..f7096eb7ee 100644
--- a/horovod/common/elastic.py
+++ b/horovod/common/elastic.py
@@ -157,10 +157,10 @@ def wrapper(state, *args, **kwargs):
try:
while True:
- if not skip_sync:
- state.sync()
-
try:
+ if not skip_sync:
+ state.sync()
+
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
| 【Elastic Horovod】Should we catch exceptions for state.sync()?
**Environment:**
1. Framework: (TensorFlow, Keras, PyTorch, MXNet): Pytorch
2. Framework version: 1.6.0
3. Horovod version: 0.21.3
4. MPI version: 4.0.3
5. CUDA version: 10.2
6. NCCL version: 2.7.6
7. Python version: 3.6
**Checklist:**
1. Did you search issues to find if somebody asked this question before? Yes.
2. If your question is about hang, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/running.rst)?
3. If your question is about docker, did you read [this doc](https://github.com/horovod/horovod/blob/master/docs/docker.rst)?
4. Did you check if you question is answered in the [troubleshooting guide] (https://github.com/horovod/horovod/blob/master/docs/troubleshooting.rst)? Yes
**Bug report:**
When a new worker was added in host discovery script, old workers will sync their state to new one. But if any worker failed during state synchronization, unfortunately, the elastic horovod task will fail and it seems not play a role for elastic:
```
[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349
[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread
[0]<stderr>:Traceback (most recent call last):
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 960, in synchronize
[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
[0]<stderr>:
[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>:
[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File "pytorch_synthetic_benchmark_elastic.py", line 140, in <module>
[0]<stderr>: run_benchmark(state)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py", line 162, in wrapper
[0]<stderr>: state.sync()
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 62, in sync
[0]<stderr>: handler.sync()[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 101, in sync
[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py", line 58, in broadcast_parameters
[0]<stderr>: synchronize(handle)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 964, in synchronize
[0]<stderr>: raise HorovodInternalError(e)
[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
```
I think that was caused by this [code segment:](https://github.com/horovod/horovod/blob/139416965ab9aa5850baf96ec54ce35c58b05119/horovod/common/elastic.py#L161)
It works well for me when I fix code as follows
```
def run_fn(func, reset):
....
try:
while True:
try:
# Here we also catch exceptions for state.sync().
if not skip_sync:
state.sync()
return func(state, *args, **kwargs)
except HorovodInternalError:
state.restore()
skip_sync = False
except HostsUpdatedInterrupt as e:
skip_sync = e.skip_sync
reset()
state.on_reset()
finally:
notification_manager.remove_listener(state)
return wrapper
```
**Steps to reproduce.**
1. In order to easily reproduce the problem, we add one line in `horovod/examples/elastic/pytorch/pytorch_synthetic_benchmark_elastic.py` as follows:
```
...
state.register_reset_callbacks([on_state_reset])
# Here we sleep 30s to keep old workers stay in state.sync() when a new worker
# was add in host-discovery-script.
time.sleep(30)
run_benchmark(state)
...
```
2. Run elastic horovod:
```
horovodrun -np 1 --host-discovery-script ./discovery_hosts.sh --network-interface eth1 --min-np 1 --log-level DEBUG --verbose python3 pytorch_synthetic_benchmark_elastic.py --num-iters=1000
```
3. After some iteration passed, we add a new worker in host-discovery-script to raise `HostsUpdatedInterrupt`. The old workers will call `state.sync()` and hang in `state.sync()` for 30s as new worker will sleep 30s before `hvd.elastic.run`
4. When old worker was hang in `state.sync`, we kill one old worker to raise `HorovodInternalError` . At this time the elastic horovod will fail. The content of stderr as follows:
```
[0]<stderr>:[2021-06-21 21:35:05.743047: E /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:640] Horovod background loop uncaught exception: [/pytorch/third_party/gloo/gloo/transport/tcp/pair.cc:575] Connection closed by peer [11.198.63.123]:50349
[0]<stdout>:[2021-06-21 21:35:05.773132: D /tmp/pip-req-build-4rhufbvy/horovod/common/operations.cc:652] [0]: Shutting down background thread
[0]<stderr>:Traceback (most recent call last):
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 960, in synchronize
[0]<stderr>: mpi_lib.horovod_torch_wait_and_clear(handle)[0]<stderr>:RuntimeError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
[0]<stderr>:
[0]<stderr>:During handling of the above exception, another exception occurred:[0]<stderr>:
[0]<stderr>:Traceback (most recent call last):[0]<stderr>: File "pytorch_synthetic_benchmark_elastic.py", line 140, in <module>
[0]<stderr>: run_benchmark(state)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/common/elastic.py", line 162, in wrapper
[0]<stderr>: state.sync()
[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 62, in sync
[0]<stderr>: handler.sync()[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/elastic/state.py", line 101, in sync
[0]<stderr>: broadcast_parameters(self.value.state_dict(), root_rank=0)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/functions.py", line 58, in broadcast_parameters
[0]<stderr>: synchronize(handle)[0]<stderr>: File "/usr/local/lib64/python3.6/site-packages/horovod/torch/mpi_ops.py", line 964, in synchronize
[0]<stderr>: raise HorovodInternalError(e)
[0]<stderr>:horovod.common.exceptions.HorovodInternalError: Horovod has been shut down. This was caused by an exception on one of the ranks or an attempt to allreduce, allgather or broadcast a tensor after one of the ranks finished execution. If the shutdown was caused by an exception, you should see the exception in the log before the first shutdown message.
```
|
scoutapp__scout_apm_python-672 | [
{
"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.compat import get_pos_args, unwrap_decorators\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.debug(\"Instrumenting elasticsearch.\")\n\n if Elasticsearch is None:\n logger.debug(\n \"Couldn't import elasticsearch.Elasticsearch - probably not installed.\"\n )\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"close\", False),\n ClientMethod(\"close_point_in_time\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_script_context\", False),\n ClientMethod(\"get_script_languages\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"open_point_in_time\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n # so to be safe we need to inspect the wrapped method's positional\n # arguments to see if we should pull it from there\n if \"index\" in kwargs:\n index = kwargs[\"index\"]\n else:\n unwrapped = unwrap_decorators(wrapped)\n pos_args = get_pos_args(unwrapped)\n try:\n index_index = pos_args.index(\"index\")\n except ValueError: # pragma: no cover\n # This guards against the method not accepting an 'index' argument\n # but they all do - for now\n index = \"\"\n else:\n try:\n index = args[index_index - 1] # subtract 'self'\n except IndexError:\n index = \"\"\n\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n if index == \"\":\n index = \"Unknown\"\n index = index.title()\n\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(\n operation=\"Elasticsearch/{}\".format(op),\n ignore_children=True,\n ):\n return wrapped(*args, **kwargs)\n",
"path": "src/scout_apm/instruments/elasticsearch.py"
}
] | [
{
"content": "# coding=utf-8\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom collections import namedtuple\n\nimport wrapt\n\nfrom scout_apm.compat import get_pos_args, unwrap_decorators\nfrom scout_apm.core.tracked_request import TrackedRequest\n\ntry:\n from elasticsearch import Elasticsearch, Transport\nexcept ImportError: # pragma: no cover\n Elasticsearch = None\n Transport = None\n\nlogger = logging.getLogger(__name__)\n\n\ndef ensure_installed():\n logger.debug(\"Instrumenting elasticsearch.\")\n\n if Elasticsearch is None:\n logger.debug(\n \"Couldn't import elasticsearch.Elasticsearch - probably not installed.\"\n )\n else:\n ensure_client_instrumented()\n ensure_transport_instrumented()\n\n\nClientMethod = namedtuple(\"ClientMethod\", [\"name\", \"takes_index_argument\"])\n\nCLIENT_METHODS = [\n ClientMethod(\"bulk\", True),\n ClientMethod(\"clear_scroll\", False),\n ClientMethod(\"close\", False),\n ClientMethod(\"close_point_in_time\", False),\n ClientMethod(\"count\", True),\n ClientMethod(\"create\", True),\n ClientMethod(\"delete\", True),\n ClientMethod(\"delete_by_query\", True),\n ClientMethod(\"delete_by_query_rethrottle\", False),\n ClientMethod(\"delete_script\", False),\n ClientMethod(\"exists\", True),\n ClientMethod(\"exists_source\", True),\n ClientMethod(\"explain\", True),\n ClientMethod(\"field_caps\", True),\n ClientMethod(\"get\", True),\n ClientMethod(\"get_script\", False),\n ClientMethod(\"get_script_context\", False),\n ClientMethod(\"get_script_languages\", False),\n ClientMethod(\"get_source\", True),\n ClientMethod(\"index\", True),\n ClientMethod(\"info\", False),\n ClientMethod(\"mget\", True),\n ClientMethod(\"msearch\", True),\n ClientMethod(\"msearch_template\", True),\n ClientMethod(\"mtermvectors\", True),\n ClientMethod(\"open_point_in_time\", True),\n ClientMethod(\"ping\", False),\n ClientMethod(\"put_script\", False),\n ClientMethod(\"rank_eval\", True),\n ClientMethod(\"reindex\", False),\n ClientMethod(\"reindex_rethrottle\", False),\n ClientMethod(\"render_search_template\", False),\n ClientMethod(\"scripts_painless_execute\", False),\n ClientMethod(\"scroll\", False),\n ClientMethod(\"search\", True),\n ClientMethod(\"search_shards\", True),\n ClientMethod(\"search_template\", True),\n ClientMethod(\"termvectors\", True),\n ClientMethod(\"terms_enum\", True),\n ClientMethod(\"update\", True),\n ClientMethod(\"update_by_query\", True),\n ClientMethod(\"update_by_query_rethrottle\", False),\n]\n\n\nhave_patched_client = False\n\n\ndef ensure_client_instrumented():\n global have_patched_client\n\n if not have_patched_client:\n for name, takes_index_argument in CLIENT_METHODS:\n try:\n method = getattr(Elasticsearch, name)\n if takes_index_argument:\n wrapped = wrap_client_index_method(method)\n else:\n wrapped = wrap_client_method(method)\n setattr(Elasticsearch, name, wrapped)\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Elasticsearch.%s: %r\",\n name,\n exc,\n exc_info=exc,\n )\n\n have_patched_client = True\n\n\[email protected]\ndef wrap_client_index_method(wrapped, instance, args, kwargs):\n # elasticsearch-py 7.5.1 changed the order of arguments for client methods,\n # so to be safe we need to inspect the wrapped method's positional\n # arguments to see if we should pull it from there\n if \"index\" in kwargs:\n index = kwargs[\"index\"]\n else:\n unwrapped = unwrap_decorators(wrapped)\n pos_args = get_pos_args(unwrapped)\n try:\n index_index = pos_args.index(\"index\")\n except ValueError: # pragma: no cover\n # This guards against the method not accepting an 'index' argument\n # but they all do - for now\n index = \"\"\n else:\n try:\n index = args[index_index - 1] # subtract 'self'\n except IndexError:\n index = \"\"\n\n if isinstance(index, (list, tuple)):\n index = \",\".join(index)\n if index == \"\":\n index = \"Unknown\"\n index = index.title()\n\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}/{}\".format(index, camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\[email protected]\ndef wrap_client_method(wrapped, instance, args, kwargs):\n camel_name = \"\".join(c.title() for c in wrapped.__name__.split(\"_\"))\n operation = \"Elasticsearch/{}\".format(camel_name)\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(operation=operation, ignore_children=True):\n return wrapped(*args, **kwargs)\n\n\nhave_patched_transport = False\n\n\ndef ensure_transport_instrumented():\n global have_patched_transport\n\n if not have_patched_transport:\n try:\n Transport.perform_request = wrapped_perform_request(\n Transport.perform_request\n )\n except Exception as exc:\n logger.warning(\n \"Failed to instrument elasticsearch.Transport.perform_request: %r\",\n exc,\n exc_info=exc,\n )\n\n have_patched_transport = True\n\n\ndef _sanitize_name(name):\n try:\n op = name.split(\"/\")[-1]\n op = op[1:] # chop leading '_' from op\n known_names = (\n \"bench\",\n \"bulk\",\n \"count\",\n \"exists\",\n \"explain\",\n \"field_stats\",\n \"health\",\n \"mget\",\n \"mlt\",\n \"mpercolate\",\n \"msearch\",\n \"mtermvectors\",\n \"percolate\",\n \"query\",\n \"scroll\",\n \"search_shards\",\n \"source\",\n \"suggest\",\n \"template\",\n \"termvectors\",\n \"update\",\n \"search\",\n )\n if op in known_names:\n return op.title()\n return \"Unknown\"\n except Exception:\n return \"Unknown\"\n\n\[email protected]\ndef wrapped_perform_request(wrapped, instance, args, kwargs):\n try:\n op = _sanitize_name(args[1])\n except IndexError:\n op = \"Unknown\"\n\n tracked_request = TrackedRequest.instance()\n with tracked_request.span(\n operation=\"Elasticsearch/{}\".format(op),\n ignore_children=True,\n ):\n return wrapped(*args, **kwargs)\n",
"path": "src/scout_apm/instruments/elasticsearch.py"
}
] | diff --git a/CHANGELOG.md b/CHANGELOG.md
index c66144d9..f7b7cc77 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -3,6 +3,8 @@
## Pending
### Added
+- Added `terms_enum` instrumentation for ElasticSearch 7.14.0.
+ ([Issue #671](https://github.com/scoutapp/scout_apm_python/issues/671))
### Fixed
diff --git a/src/scout_apm/instruments/elasticsearch.py b/src/scout_apm/instruments/elasticsearch.py
index bdfd4c46..1cc6a826 100644
--- a/src/scout_apm/instruments/elasticsearch.py
+++ b/src/scout_apm/instruments/elasticsearch.py
@@ -71,6 +71,7 @@ def ensure_installed():
ClientMethod("search_shards", True),
ClientMethod("search_template", True),
ClientMethod("termvectors", True),
+ ClientMethod("terms_enum", True),
ClientMethod("update", True),
ClientMethod("update_by_query", True),
ClientMethod("update_by_query_rethrottle", False),
| Support ElasticSearch 7.14
The python package `elasticsearch-py` introduced the `terms_enum` parameter from ElasticSearch 7.14. This is currently not being instrumented and breaking tests.
|
arviz-devs__arviz-1043 | [
{
"content": "\"\"\"PyMC3-specific conversion code.\"\"\"\nimport logging\nfrom typing import Dict, List, Any, Optional, TYPE_CHECKING\nfrom types import ModuleType\n\nimport numpy as np\nimport xarray as xr\nfrom .. import utils\nfrom .inference_data import InferenceData, concat\nfrom .base import requires, dict_to_dataset, generate_dims_coords, make_attrs\n\nif TYPE_CHECKING:\n import pymc3 as pm\n from pymc3 import MultiTrace, Model # pylint: disable=invalid-name\n import theano\n from typing import Set # pylint: disable=ungrouped-imports\nelse:\n MultiTrace = Any # pylint: disable=invalid-name\n Model = Any # pylint: disable=invalid-name\n\n___all__ = [\"\"]\n\n_log = logging.getLogger(__name__)\n\nCoords = Dict[str, List[Any]]\nDims = Dict[str, List[str]]\n# random variable object ...\nVar = Any # pylint: disable=invalid-name\n\n\ndef _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name\n assert pm.__name__ == \"pymc3\"\n\n def fixed_eq(self, other):\n \"\"\"Use object identity for MultiObservedRV equality.\"\"\"\n return self is other\n\n if tuple([int(x) for x in pm.__version__.split(\".\")]) < (3, 9): # type: ignore\n pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore\n\n\nclass PyMC3Converter: # pylint: disable=too-many-instance-attributes\n \"\"\"Encapsulate PyMC3 specific logic.\"\"\"\n\n model = None # type: Optional[pm.Model]\n nchains = None # type: int\n ndraws = None # type: int\n posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]\n predictions = None # Type: Optional[Dict[str, np.ndarray]]\n prior = None # Type: Optional[Dict[str, np.ndarray]]\n\n def __init__(\n self,\n *,\n trace=None,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n coords: Optional[Coords] = None,\n dims: Optional[Dims] = None,\n model=None\n ):\n import pymc3\n import theano\n\n _monkey_patch_pymc3(pymc3)\n\n self.pymc3 = pymc3\n self.theano = theano\n\n self.trace = trace\n\n # this permits us to get the model from command-line argument or from with model:\n try:\n self.model = self.pymc3.modelcontext(model or self.model)\n except TypeError:\n self.model = None\n\n # This next line is brittle and may not work forever, but is a secret\n # way to access the model from the trace.\n if trace is not None:\n if self.model is None:\n self.model = self.trace._straces[0].model # pylint: disable=protected-access\n self.nchains = trace.nchains if hasattr(trace, \"nchains\") else 1\n self.ndraws = len(trace)\n else:\n self.nchains = self.ndraws = 0\n\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.predictions = predictions\n\n def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:\n return next(iter(dct.values()))\n\n if trace is None:\n # if you have a posterior_predictive built with keep_dims,\n # you'll lose here, but there's nothing I can do about that.\n self.nchains = 1\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None:\n # pylint: disable=line-too-long\n raise ValueError(\n \"\"\"When constructing InferenceData must have at least\n one of trace, prior, posterior_predictive or predictions.\"\"\"\n )\n\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0]\n\n self.coords = coords\n self.dims = dims\n self.observations = self.find_observations()\n\n def find_observations(self) -> Optional[Dict[str, Var]]:\n \"\"\"If there are observations available, return them as a dictionary.\"\"\"\n has_observations = False\n if self.trace is not None:\n assert self.model is not None, \"Cannot identify observations without PymC3 model\"\n if any((hasattr(obs, \"observations\") for obs in self.model.observed_RVs)):\n has_observations = True\n if has_observations:\n assert self.model is not None\n return {obs.name: obs.observations for obs in self.model.observed_RVs}\n return None\n\n def log_likelihood_vals_point(self, point, var, log_like_fun):\n \"\"\"Compute log likelihood for each observed point.\"\"\"\n log_like_val = utils.one_de(log_like_fun(point))\n if var.missing_values:\n log_like_val = np.where(var.observations.mask, np.nan, log_like_val)\n return log_like_val\n\n @requires(\"trace\")\n @requires(\"model\")\n def _extract_log_likelihood(self):\n \"\"\"Compute log likelihood of each observation.\"\"\"\n # If we have predictions, then we have a thinned trace which does not\n # support extracting a log likelihood.\n cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]\n log_likelihood_dict = {}\n for var, log_like_fun in cached:\n chain_likelihoods = []\n for chain in self.trace.chains:\n log_like_chain = [\n self.log_likelihood_vals_point(point, var, log_like_fun)\n for point in self.trace.points([chain])\n ]\n chain_likelihoods.append(np.stack(log_like_chain))\n log_likelihood_dict[var.name] = np.stack(chain_likelihoods)\n return log_likelihood_dict\n\n @requires(\"trace\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n data = {}\n for var_name in var_names:\n data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires(\"trace\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from PyMC3 trace.\"\"\"\n data = {}\n rename_key = {\"model_logp\": \"lp\"}\n data = {}\n for stat in self.trace.stat_names:\n name = rename_key.get(stat, stat)\n data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))\n\n return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)\n\n @requires(\"trace\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood and log_p data from PyMC3 trace.\"\"\"\n if self.predictions:\n return None\n data = self._extract_log_likelihood()\n return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)\n\n def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:\n \"\"\"Take Dict of variables to numpy ndarrays (samples) and translate into dataset.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n # pylint: disable=line-too-long\n _log.warning(\n \"posterior predictive variable %s's shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\",\n k,\n )\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires([\"posterior_predictive\"])\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)\n\n @requires([\"predictions\"])\n def predictions_to_xarray(self):\n \"\"\"Convert predictions (out of sample predictions) to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.trace is not None:\n prior_vars = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.pymc3,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_data = {}\n for name, vals in self.observations.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n vals = utils.one_de(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))\n\n @requires([\"trace\", \"predictions\"])\n @requires(\"model\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant data to xarray.\"\"\"\n # For constant data, we are concerned only with deterministics and data.\n # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic\n constant_data_vars = {} # type: Dict[str, Var]\n for var in self.model.deterministics:\n ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)\n # no dependency on a random variable\n if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):\n constant_data_vars[var.name] = var\n\n def is_data(name, var) -> bool:\n assert self.model is not None\n return (\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n and (self.observations is None or name not in self.observations)\n )\n\n # I don't know how to find pm.Data, except that they are named variables that aren't\n # observed or free RVs, nor are they deterministics, and then we eliminate observations.\n for name, var in self.model.named_vars.items():\n if is_data(name, var):\n constant_data_vars[name] = var\n\n if not constant_data_vars:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n constant_data = {}\n for name, vals in constant_data_vars.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n # this might be a Deterministic, and must be evaluated\n elif hasattr(self.model[name], \"eval\"):\n vals = self.model[name].eval()\n vals = np.atleast_1d(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n try:\n constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n except ValueError as e: # pylint: disable=invalid-name\n raise ValueError(\"Error translating constant_data variable %s: %s\" % (name, e))\n return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (e.g., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n id_dict = {\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n if self.predictions:\n id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\n else:\n id_dict[\"constant_data\"] = self.constant_data_to_xarray()\n return InferenceData(**id_dict)\n\n\ndef from_pymc3(\n trace=None, *, prior=None, posterior_predictive=None, coords=None, dims=None, model=None\n):\n \"\"\"Convert pymc3 data into an InferenceData object.\"\"\"\n return PyMC3Converter(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n coords=coords,\n dims=dims,\n model=model,\n ).to_inference_data()\n\n\n### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But\n### perhaps we should have an inplace argument?\ndef from_pymc3_predictions(\n predictions,\n posterior_trace: Optional[MultiTrace] = None,\n model: Optional[Model] = None,\n coords=None,\n dims=None,\n idata_orig: Optional[InferenceData] = None,\n inplace: bool = False,\n) -> InferenceData:\n \"\"\"Translate out-of-sample predictions into ``InferenceData``.\n\n Parameters\n ----------\n predictions: Dict[str, np.ndarray]\n The predictions are the return value of ``pymc3.sample_posterior_predictive``,\n a dictionary of strings (variable names) to numpy ndarrays (draws).\n posterior_trace: pm.MultiTrace\n This should be a trace that has been thinned appropriately for\n ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is\n a deterministic function of the shape of any predictor (explanatory, independent, etc.)\n variables must be *removed* from this trace.\n model: pymc3.Model\n This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.\n The reason is that the posterior_trace argument is likely to supply an incorrect\n value of model.\n coords: Dict[str, array-like[Any]]\n Coordinates for the variables. Map from coordinate names to coordinate values.\n dims: Dict[str, array-like[str]]\n Map from variable name to ordered set of coordinate names.\n idata_orig: InferenceData, optional\n If supplied, then modify this inference data in place, adding ``predictions`` and\n (if available) ``predictions_constant_data`` groups. If this is not supplied, make a\n fresh InferenceData\n inplace: boolean, optional\n If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,\n rather than returning a fresh InferenceData object.\n\n Returns\n -------\n InferenceData:\n May be modified ``idata_orig``.\n \"\"\"\n if inplace and not idata_orig:\n raise ValueError(\n (\n \"Do not pass True for inplace unless passing\"\n \"an existing InferenceData as idata_orig\"\n )\n )\n new_idata = PyMC3Converter(\n trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims\n ).to_inference_data()\n if idata_orig is None:\n return new_idata\n elif inplace:\n concat([idata_orig, new_idata], dim=None, inplace=True)\n return idata_orig\n else:\n # if we are not returning in place, then merge the old groups into the new inference\n # data and return that.\n concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n return new_idata\n",
"path": "arviz/data/io_pymc3.py"
}
] | [
{
"content": "\"\"\"PyMC3-specific conversion code.\"\"\"\nimport logging\nfrom typing import Dict, List, Any, Optional, TYPE_CHECKING\nfrom types import ModuleType\n\nimport numpy as np\nimport xarray as xr\nfrom .. import utils\nfrom .inference_data import InferenceData, concat\nfrom .base import requires, dict_to_dataset, generate_dims_coords, make_attrs\n\nif TYPE_CHECKING:\n import pymc3 as pm\n from pymc3 import MultiTrace, Model # pylint: disable=invalid-name\n import theano\n from typing import Set # pylint: disable=ungrouped-imports\nelse:\n MultiTrace = Any # pylint: disable=invalid-name\n Model = Any # pylint: disable=invalid-name\n\n___all__ = [\"\"]\n\n_log = logging.getLogger(__name__)\n\nCoords = Dict[str, List[Any]]\nDims = Dict[str, List[str]]\n# random variable object ...\nVar = Any # pylint: disable=invalid-name\n\n\ndef _monkey_patch_pymc3(pm: ModuleType) -> None: # pylint: disable=invalid-name\n assert pm.__name__ == \"pymc3\"\n\n def fixed_eq(self, other):\n \"\"\"Use object identity for MultiObservedRV equality.\"\"\"\n return self is other\n\n if tuple([int(x) for x in pm.__version__.split(\".\")]) < (3, 9): # type: ignore\n pm.model.MultiObservedRV.__eq__ = fixed_eq # type: ignore\n\n\nclass PyMC3Converter: # pylint: disable=too-many-instance-attributes\n \"\"\"Encapsulate PyMC3 specific logic.\"\"\"\n\n model = None # type: Optional[pm.Model]\n nchains = None # type: int\n ndraws = None # type: int\n posterior_predictive = None # Type: Optional[Dict[str, np.ndarray]]\n predictions = None # Type: Optional[Dict[str, np.ndarray]]\n prior = None # Type: Optional[Dict[str, np.ndarray]]\n\n def __init__(\n self,\n *,\n trace=None,\n prior=None,\n posterior_predictive=None,\n predictions=None,\n coords: Optional[Coords] = None,\n dims: Optional[Dims] = None,\n model=None\n ):\n import pymc3\n import theano\n\n _monkey_patch_pymc3(pymc3)\n\n self.pymc3 = pymc3\n self.theano = theano\n\n self.trace = trace\n\n # this permits us to get the model from command-line argument or from with model:\n try:\n self.model = self.pymc3.modelcontext(model or self.model)\n except TypeError:\n self.model = None\n\n # This next line is brittle and may not work forever, but is a secret\n # way to access the model from the trace.\n if trace is not None:\n if self.model is None:\n self.model = self.trace._straces[0].model # pylint: disable=protected-access\n self.nchains = trace.nchains if hasattr(trace, \"nchains\") else 1\n self.ndraws = len(trace)\n else:\n self.nchains = self.ndraws = 0\n\n self.prior = prior\n self.posterior_predictive = posterior_predictive\n self.predictions = predictions\n\n def arbitrary_element(dct: Dict[Any, np.ndarray]) -> np.ndarray:\n return next(iter(dct.values()))\n\n if trace is None:\n # if you have a posterior_predictive built with keep_dims,\n # you'll lose here, but there's nothing I can do about that.\n self.nchains = 1\n get_from = None\n if predictions is not None:\n get_from = predictions\n elif posterior_predictive is not None:\n get_from = posterior_predictive\n elif prior is not None:\n get_from = prior\n if get_from is None:\n # pylint: disable=line-too-long\n raise ValueError(\n \"\"\"When constructing InferenceData must have at least\n one of trace, prior, posterior_predictive or predictions.\"\"\"\n )\n\n aelem = arbitrary_element(get_from)\n self.ndraws = aelem.shape[0]\n\n self.coords = coords\n self.dims = dims\n self.observations = self.find_observations()\n\n def find_observations(self) -> Optional[Dict[str, Var]]:\n \"\"\"If there are observations available, return them as a dictionary.\"\"\"\n has_observations = False\n if self.trace is not None:\n assert self.model is not None, \"Cannot identify observations without PymC3 model\"\n if any((hasattr(obs, \"observations\") for obs in self.model.observed_RVs)):\n has_observations = True\n if has_observations:\n assert self.model is not None\n return {obs.name: obs.observations for obs in self.model.observed_RVs}\n return None\n\n def log_likelihood_vals_point(self, point, var, log_like_fun):\n \"\"\"Compute log likelihood for each observed point.\"\"\"\n log_like_val = utils.one_de(log_like_fun(point))\n if var.missing_values:\n log_like_val = np.where(var.observations.mask, np.nan, log_like_val)\n return log_like_val\n\n @requires(\"trace\")\n @requires(\"model\")\n def _extract_log_likelihood(self):\n \"\"\"Compute log likelihood of each observation.\"\"\"\n # If we have predictions, then we have a thinned trace which does not\n # support extracting a log likelihood.\n cached = [(var, var.logp_elemwise) for var in self.model.observed_RVs]\n log_likelihood_dict = {}\n for var, log_like_fun in cached:\n chain_likelihoods = []\n for chain in self.trace.chains:\n log_like_chain = [\n self.log_likelihood_vals_point(point, var, log_like_fun)\n for point in self.trace.points([chain])\n ]\n chain_likelihoods.append(np.stack(log_like_chain))\n log_likelihood_dict[var.name] = np.stack(chain_likelihoods)\n return log_likelihood_dict\n\n @requires(\"trace\")\n def posterior_to_xarray(self):\n \"\"\"Convert the posterior to an xarray dataset.\"\"\"\n var_names = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n data = {}\n for var_name in var_names:\n data[var_name] = np.array(self.trace.get_values(var_name, combine=False, squeeze=False))\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires(\"trace\")\n def sample_stats_to_xarray(self):\n \"\"\"Extract sample_stats from PyMC3 trace.\"\"\"\n data = {}\n rename_key = {\"model_logp\": \"lp\"}\n data = {}\n for stat in self.trace.stat_names:\n name = rename_key.get(stat, stat)\n data[name] = np.array(self.trace.get_sampler_stats(stat, combine=False))\n\n return dict_to_dataset(data, library=self.pymc3, dims=None, coords=self.coords)\n\n @requires(\"trace\")\n @requires(\"model\")\n def log_likelihood_to_xarray(self):\n \"\"\"Extract log likelihood and log_p data from PyMC3 trace.\"\"\"\n if self.predictions:\n return None\n data = self._extract_log_likelihood()\n return dict_to_dataset(data, library=self.pymc3, dims=self.dims, coords=self.coords)\n\n def translate_posterior_predictive_dict_to_xarray(self, dct) -> xr.Dataset:\n \"\"\"Take Dict of variables to numpy ndarrays (samples) and translate into dataset.\"\"\"\n data = {}\n for k, ary in dct.items():\n shape = ary.shape\n if shape[0] == self.nchains and shape[1] == self.ndraws:\n data[k] = ary\n elif shape[0] == self.nchains * self.ndraws:\n data[k] = ary.reshape((self.nchains, self.ndraws, *shape[1:]))\n else:\n data[k] = utils.expand_dims(ary)\n # pylint: disable=line-too-long\n _log.warning(\n \"posterior predictive variable %s's shape not compatible with number of chains and draws. \"\n \"This can mean that some draws or even whole chains are not represented.\",\n k,\n )\n return dict_to_dataset(data, library=self.pymc3, coords=self.coords, dims=self.dims)\n\n @requires([\"posterior_predictive\"])\n def posterior_predictive_to_xarray(self):\n \"\"\"Convert posterior_predictive samples to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.posterior_predictive)\n\n @requires([\"predictions\"])\n def predictions_to_xarray(self):\n \"\"\"Convert predictions (out of sample predictions) to xarray.\"\"\"\n return self.translate_posterior_predictive_dict_to_xarray(self.predictions)\n\n def priors_to_xarray(self):\n \"\"\"Convert prior samples (and if possible prior predictive too) to xarray.\"\"\"\n if self.prior is None:\n return {\"prior\": None, \"prior_predictive\": None}\n if self.trace is not None:\n prior_vars = self.pymc3.util.get_default_varnames( # pylint: disable=no-member\n self.trace.varnames, include_transformed=False\n )\n prior_predictive_vars = [key for key in self.prior.keys() if key not in prior_vars]\n else:\n prior_vars = list(self.prior.keys())\n prior_predictive_vars = None\n\n priors_dict = {}\n for group, var_names in zip(\n (\"prior\", \"prior_predictive\"), (prior_vars, prior_predictive_vars)\n ):\n priors_dict[group] = (\n None\n if var_names is None\n else dict_to_dataset(\n {k: utils.expand_dims(self.prior[k]) for k in var_names},\n library=self.pymc3,\n coords=self.coords,\n dims=self.dims,\n )\n )\n return priors_dict\n\n @requires(\"observations\")\n @requires(\"model\")\n def observed_data_to_xarray(self):\n \"\"\"Convert observed data to xarray.\"\"\"\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n observed_data = {}\n for name, vals in self.observations.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n vals = utils.one_de(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n observed_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n return xr.Dataset(data_vars=observed_data, attrs=make_attrs(library=self.pymc3))\n\n @requires([\"trace\", \"predictions\"])\n @requires(\"model\")\n def constant_data_to_xarray(self):\n \"\"\"Convert constant data to xarray.\"\"\"\n # For constant data, we are concerned only with deterministics and data.\n # The constant data vars must be either pm.Data (TensorSharedVariable) or pm.Deterministic\n constant_data_vars = {} # type: Dict[str, Var]\n for var in self.model.deterministics:\n ancestors = self.theano.tensor.gof.graph.ancestors(var.owner.inputs)\n # no dependency on a random variable\n if not any((isinstance(a, self.pymc3.model.PyMC3Variable) for a in ancestors)):\n constant_data_vars[var.name] = var\n\n def is_data(name, var) -> bool:\n assert self.model is not None\n return (\n var not in self.model.deterministics\n and var not in self.model.observed_RVs\n and var not in self.model.free_RVs\n and var not in self.model.potentials\n and (self.observations is None or name not in self.observations)\n )\n\n # I don't know how to find pm.Data, except that they are named variables that aren't\n # observed or free RVs, nor are they deterministics, and then we eliminate observations.\n for name, var in self.model.named_vars.items():\n if is_data(name, var):\n constant_data_vars[name] = var\n\n if not constant_data_vars:\n return None\n if self.dims is None:\n dims = {}\n else:\n dims = self.dims\n constant_data = {}\n for name, vals in constant_data_vars.items():\n if hasattr(vals, \"get_value\"):\n vals = vals.get_value()\n # this might be a Deterministic, and must be evaluated\n elif hasattr(self.model[name], \"eval\"):\n vals = self.model[name].eval()\n vals = np.atleast_1d(vals)\n val_dims = dims.get(name)\n val_dims, coords = generate_dims_coords(\n vals.shape, name, dims=val_dims, coords=self.coords\n )\n # filter coords based on the dims\n coords = {key: xr.IndexVariable((key,), data=coords[key]) for key in val_dims}\n try:\n constant_data[name] = xr.DataArray(vals, dims=val_dims, coords=coords)\n except ValueError as e: # pylint: disable=invalid-name\n raise ValueError(\"Error translating constant_data variable %s: %s\" % (name, e))\n return xr.Dataset(data_vars=constant_data, attrs=make_attrs(library=self.pymc3))\n\n def to_inference_data(self):\n \"\"\"Convert all available data to an InferenceData object.\n\n Note that if groups can not be created (e.g., there is no `trace`, so\n the `posterior` and `sample_stats` can not be extracted), then the InferenceData\n will not have those groups.\n \"\"\"\n id_dict = {\n \"posterior\": self.posterior_to_xarray(),\n \"sample_stats\": self.sample_stats_to_xarray(),\n \"log_likelihood\": self.log_likelihood_to_xarray(),\n \"posterior_predictive\": self.posterior_predictive_to_xarray(),\n \"predictions\": self.predictions_to_xarray(),\n **self.priors_to_xarray(),\n \"observed_data\": self.observed_data_to_xarray(),\n }\n if self.predictions:\n id_dict[\"predictions_constant_data\"] = self.constant_data_to_xarray()\n else:\n id_dict[\"constant_data\"] = self.constant_data_to_xarray()\n return InferenceData(**id_dict)\n\n\ndef from_pymc3(\n trace=None, *, prior=None, posterior_predictive=None, coords=None, dims=None, model=None\n):\n \"\"\"Convert pymc3 data into an InferenceData object.\"\"\"\n return PyMC3Converter(\n trace=trace,\n prior=prior,\n posterior_predictive=posterior_predictive,\n coords=coords,\n dims=dims,\n model=model,\n ).to_inference_data()\n\n\n### Later I could have this return ``None`` if the ``idata_orig`` argument is supplied. But\n### perhaps we should have an inplace argument?\ndef from_pymc3_predictions(\n predictions,\n posterior_trace: Optional[MultiTrace] = None,\n model: Optional[Model] = None,\n coords=None,\n dims=None,\n idata_orig: Optional[InferenceData] = None,\n inplace: bool = False,\n) -> InferenceData:\n \"\"\"Translate out-of-sample predictions into ``InferenceData``.\n\n Parameters\n ----------\n predictions: Dict[str, np.ndarray]\n The predictions are the return value of ``pymc3.sample_posterior_predictive``,\n a dictionary of strings (variable names) to numpy ndarrays (draws).\n posterior_trace: pm.MultiTrace\n This should be a trace that has been thinned appropriately for\n ``pymc3.sample_posterior_predictive``. Specifically, any variable whose shape is\n a deterministic function of the shape of any predictor (explanatory, independent, etc.)\n variables must be *removed* from this trace.\n model: pymc3.Model\n This argument is *not* optional, unlike in conventional uses of ``from_pymc3``.\n The reason is that the posterior_trace argument is likely to supply an incorrect\n value of model.\n coords: Dict[str, array-like[Any]]\n Coordinates for the variables. Map from coordinate names to coordinate values.\n dims: Dict[str, array-like[str]]\n Map from variable name to ordered set of coordinate names.\n idata_orig: InferenceData, optional\n If supplied, then modify this inference data in place, adding ``predictions`` and\n (if available) ``predictions_constant_data`` groups. If this is not supplied, make a\n fresh InferenceData\n inplace: boolean, optional\n If idata_orig is supplied and inplace is True, merge the predictions into idata_orig,\n rather than returning a fresh InferenceData object.\n\n Returns\n -------\n InferenceData:\n May be modified ``idata_orig``.\n \"\"\"\n if inplace and not idata_orig:\n raise ValueError(\n (\n \"Do not pass True for inplace unless passing\"\n \"an existing InferenceData as idata_orig\"\n )\n )\n new_idata = PyMC3Converter(\n trace=posterior_trace, predictions=predictions, model=model, coords=coords, dims=dims\n ).to_inference_data()\n if idata_orig is None:\n return new_idata\n elif inplace:\n concat([idata_orig, new_idata], dim=None, inplace=True)\n return idata_orig\n else:\n # if we are not returning in place, then merge the old groups into the new inference\n # data and return that.\n concat([new_idata, idata_orig], dim=None, copy=True, inplace=True)\n return new_idata\n",
"path": "arviz/data/io_pymc3.py"
}
] | diff --git a/arviz/data/io_pymc3.py b/arviz/data/io_pymc3.py
index d0013f4905..f09c4e6079 100644
--- a/arviz/data/io_pymc3.py
+++ b/arviz/data/io_pymc3.py
@@ -287,6 +287,7 @@ def is_data(name, var) -> bool:
var not in self.model.deterministics
and var not in self.model.observed_RVs
and var not in self.model.free_RVs
+ and var not in self.model.potentials
and (self.observations is None or name not in self.observations)
)
diff --git a/arviz/tests/test_data_pymc.py b/arviz/tests/test_data_pymc.py
index 6851d85824..62e907b7b9 100644
--- a/arviz/tests/test_data_pymc.py
+++ b/arviz/tests/test_data_pymc.py
@@ -251,6 +251,15 @@ def test_single_observation(self):
inference_data = from_pymc3(trace=trace)
assert inference_data
+ def test_potential(self):
+ with pm.Model():
+ x = pm.Normal("x", 0.0, 1.0)
+ pm.Potential("z", pm.Normal.dist(x, 1.0).logp(np.random.randn(10)))
+ trace = pm.sample(100, chains=2)
+
+ inference_data = from_pymc3(trace=trace)
+ assert inference_data
+
def test_constant_data(self):
with pm.Model():
x = pm.Data("x", [1.0, 2.0, 3.0])
| Error with PyMC3 model that contains Potential
**Describe the bug**
For PyMC3 model that contains Potential, io_pymc3 is attempting to call `eval()` without graph dependence.
**To Reproduce**
```python
with pm.Model() as m:
x = pm.Normal('x', 0., 1.)
pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))
trace = pm.sample()
```
returns:
```python
---------------------------------------------------------------------------
MissingInputError Traceback (most recent call last)
<ipython-input-45-c2e72dd27111> in <module>
2 x = pm.Normal('x', 0., 1.)
3 pm.Potential('z', pm.Normal.dist(x, 1.).logp(np.random.randn(10)))
----> 4 trace = pm.sample()
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/sampling.py in sample(draws, step, init, n_init, start, trace, chain_idx, chains, cores, tune, progressbar, model, random_seed, discard_tuned_samples, compute_convergence_checks, callback, **kwargs)
539 warnings.warn("The number of samples is too small to check convergence reliably.")
540 else:
--> 541 trace.report._run_convergence_checks(trace, model)
542
543 trace.report._log_summary()
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/backends/report.py in _run_convergence_checks(self, trace, model)
96 varnames.append(rv_name)
97
---> 98 self._ess = ess = ess(trace, var_names=varnames)
99 self._rhat = rhat = rhat(trace, var_names=varnames)
100
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/pymc3-3.8-py3.8.egg/pymc3/stats/__init__.py in wrapped(*args, **kwargs)
36 )
37 kwargs[new] = kwargs.pop(old)
---> 38 return func(*args, **kwargs)
39
40 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/stats/diagnostics.py in ess(data, var_names, method, relative, prob)
187 raise TypeError(msg)
188
--> 189 dataset = convert_to_dataset(data, group="posterior")
190 var_names = _var_names(var_names, dataset)
191
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_dataset(obj, group, coords, dims)
166 xarray.Dataset
167 """
--> 168 inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims)
169 dataset = getattr(inference_data, group, None)
170 if dataset is None:
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/converters.py in convert_to_inference_data(obj, group, coords, dims, **kwargs)
87 return from_pystan(**kwargs)
88 elif obj.__class__.__name__ == "MultiTrace": # ugly, but doesn't make PyMC3 a requirement
---> 89 return from_pymc3(trace=kwargs.pop(group), **kwargs)
90 elif obj.__class__.__name__ == "EnsembleSampler": # ugly, but doesn't make emcee a requirement
91 return from_emcee(sampler=kwargs.pop(group), **kwargs)
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in from_pymc3(trace, prior, posterior_predictive, coords, dims, model)
350 ):
351 """Convert pymc3 data into an InferenceData object."""
--> 352 return PyMC3Converter(
353 trace=trace,
354 prior=prior,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in to_inference_data(self)
342 id_dict["predictions_constant_data"] = self.constant_data_to_xarray()
343 else:
--> 344 id_dict["constant_data"] = self.constant_data_to_xarray()
345 return InferenceData(**id_dict)
346
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)
34 if all([getattr(cls, prop_i) is None for prop_i in prop]):
35 return None
---> 36 return func(cls, *args, **kwargs)
37
38 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/base.py in wrapped(cls, *args, **kwargs)
34 if all([getattr(cls, prop_i) is None for prop_i in prop]):
35 return None
---> 36 return func(cls, *args, **kwargs)
37
38 return wrapped
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/arviz-0.6.1-py3.8.egg/arviz/data/io_pymc3.py in constant_data_to_xarray(self)
309 # this might be a Deterministic, and must be evaluated
310 elif hasattr(self.model[name], "eval"):
--> 311 vals = self.model[name].eval()
312 vals = np.atleast_1d(vals)
313 val_dims = dims.get(name)
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/graph.py in eval(self, inputs_to_values)
520 inputs = tuple(sorted(inputs_to_values.keys(), key=id))
521 if inputs not in self._fn_cache:
--> 522 self._fn_cache[inputs] = theano.function(inputs, self)
523 args = [inputs_to_values[param] for param in inputs]
524
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function.py in function(inputs, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input)
304 # note: pfunc will also call orig_function -- orig_function is
305 # a choke point that all compilation must pass through
--> 306 fn = pfunc(params=inputs,
307 outputs=outputs,
308 mode=mode,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/pfunc.py in pfunc(params, outputs, mode, updates, givens, no_default_updates, accept_inplace, name, rebuild_strict, allow_input_downcast, profile, on_unused_input, output_keys)
481 inputs.append(si)
482
--> 483 return orig_function(inputs, cloned_outputs, mode,
484 accept_inplace=accept_inplace, name=name,
485 profile=profile, on_unused_input=on_unused_input,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in orig_function(inputs, outputs, mode, accept_inplace, name, profile, on_unused_input, output_keys)
1830 try:
1831 Maker = getattr(mode, 'function_maker', FunctionMaker)
-> 1832 m = Maker(inputs,
1833 outputs,
1834 mode,
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in __init__(self, inputs, outputs, mode, accept_inplace, function_builder, profile, on_unused_input, fgraph, output_keys, name)
1484 # make the fgraph (copies the graph, creates NEW INPUT AND
1485 # OUTPUT VARIABLES)
-> 1486 fgraph, additional_outputs = std_fgraph(inputs, outputs,
1487 accept_inplace)
1488 fgraph.profile = profile
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/compile/function_module.py in std_fgraph(input_specs, output_specs, accept_inplace)
178 orig_outputs = [spec.variable for spec in output_specs] + updates
179
--> 180 fgraph = gof.fg.FunctionGraph(orig_inputs, orig_outputs,
181 update_mapping=update_mapping)
182
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __init__(self, inputs, outputs, features, clone, update_mapping)
173
174 for output in outputs:
--> 175 self.__import_r__(output, reason="init")
176 for i, output in enumerate(outputs):
177 output.clients.append(('output', i))
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import_r__(self, variable, reason)
344 # Imports the owners of the variables
345 if variable.owner and variable.owner not in self.apply_nodes:
--> 346 self.__import__(variable.owner, reason=reason)
347 elif (variable.owner is None and
348 not isinstance(variable, graph.Constant) and
~/anaconda3/envs/pymc3/lib/python3.8/site-packages/Theano-1.0.4-py3.8.egg/theano/gof/fg.py in __import__(self, apply_node, check, reason)
389 "for more information on this error."
390 % (node.inputs.index(r), str(node)))
--> 391 raise MissingInputError(error_msg, variable=r)
392
393 for node in new_nodes:
MissingInputError: Input 0 of the graph (indices start from 0), used to compute InplaceDimShuffle{x}(x), was not provided and not given a value. Use the Theano flag exception_verbosity='high', for more information on this error.
```
|
pretix__pretix-2537 | [
{
"content": "#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see <https://pretix.eu/about/en/license>.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# <https://www.gnu.org/licenses/>.\n#\n\n# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of\n# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.\n#\n# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A\n# full history of changes and contributors is available at <https://github.com/pretix/pretix>.\n#\n# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\nimport calendar\nimport hashlib\nimport math\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom functools import reduce\nfrom urllib.parse import quote, urlencode\n\nimport dateutil\nimport isoweek\nimport pytz\nfrom django.conf import settings\nfrom django.core.cache import caches\nfrom django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q\nfrom django.db.models.functions import Coalesce, Greatest\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.formats import date_format, get_format\nfrom django.utils.timezone import get_current_timezone, now\nfrom django.views import View\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import ListView, TemplateView\nfrom pytz import UTC\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import (\n Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,\n)\nfrom pretix.base.services.quotas import QuotaAvailability\nfrom pretix.helpers.compat import date_fromisocalendar\nfrom pretix.helpers.daterange import daterange\nfrom pretix.helpers.formats.en.formats import (\n SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,\n)\nfrom pretix.multidomain.urlreverse import eventreverse\nfrom pretix.presale.ical import get_public_ical\nfrom pretix.presale.views import OrganizerViewMixin\n\n\ndef filter_qs_by_attr(qs, request):\n \"\"\"\n We'll allow to filter the event list using attributes defined in the event meta data\n models in the format ?attr[meta_name]=meta_value\n \"\"\"\n attrs = {}\n for i, item in enumerate(request.GET.items()):\n k, v = item\n if k.startswith(\"attr[\") and k.endswith(\"]\"):\n attrs[k[5:-1]] = v\n\n skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')\n if request.GET.get('attr_persist'):\n request.session[skey] = attrs\n elif skey in request.session:\n attrs = request.session[skey]\n\n props = {\n p.name: p for p in request.organizer.meta_properties.filter(\n name__in=attrs.keys()\n )\n }\n\n for i, item in enumerate(attrs.items()):\n attr, v = item\n emv_with_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n value=v\n )\n emv_with_any_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n )\n if qs.model == SubEvent:\n semv_with_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n value=v\n )\n semv_with_any_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n )\n\n prop = props.get(attr)\n if not prop:\n continue\n annotations = {'attr_{}'.format(i): Exists(emv_with_value)}\n if qs.model == SubEvent:\n annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)\n annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)\n filters = Q(**{'attr_{}_sub'.format(i): True})\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))\n else:\n filters = Q(**{'attr_{}'.format(i): True})\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(**{'attr_{}_any'.format(i): False})\n\n qs = qs.annotate(**annotations).filter(filters)\n return qs\n\n\nclass EventListMixin:\n\n def _get_event_queryset(self):\n query = Q(is_public=True) & Q(live=True)\n qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)\n qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)\n qs = qs.annotate(\n min_from=Min('subevents__date_from'),\n min_to=Min('subevents__date_to'),\n max_from=Max('subevents__date_from'),\n max_to=Max('subevents__date_to'),\n max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),\n )\n if \"old\" in self.request.GET:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(min_to__lt=now()) | Q(min_from__lt=now()))\n )\n ).annotate(\n order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),\n ).order_by('-order_to')\n else:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(max_to__gte=now()) | Q(max_from__gte=now()))\n )\n ).annotate(\n order_from=Coalesce('min_from', 'date_from'),\n ).order_by('order_from')\n qs = Event.annotated(filter_qs_by_attr(qs, self.request))\n return qs\n\n def _set_month_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_year(self):\n if 'date' in self.request.GET:\n try:\n date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n date = now().date()\n self.year = date.year\n self.month = date.month\n else:\n if hasattr(self.request, 'event'):\n self._set_month_to_next_subevent()\n else:\n self._set_month_to_next_event()\n\n def _set_week_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_year(self):\n if 'date' in self.request.GET:\n try:\n iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()\n except ValueError:\n iso = now().isocalendar()\n self.year = iso[0]\n self.week = iso[1]\n else:\n if hasattr(self.request, 'event'):\n self._set_week_to_next_subevent()\n else:\n self._set_week_to_next_event()\n\n\nclass OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):\n model = Event\n context_object_name = 'events'\n template_name = 'pretixpresale/organizers/index.html'\n paginate_by = 30\n\n def dispatch(self, request, *args, **kwargs):\n # In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,\n # so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching\n # on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well\n # as the login status etc.\n cache_allowed = (\n settings.CACHE_LARGE_VALUES_ALLOWED and\n not getattr(request, 'customer', None) and\n not request.user.is_authenticated\n )\n\n if not cache_allowed:\n return super().dispatch(request, *args, **kwargs)\n\n cache_key_parts = [\n request.method,\n request.host,\n str(request.organizer.pk),\n request.get_full_path(),\n request.LANGUAGE_CODE,\n self.request.sales_channel.identifier,\n ]\n for c, v in request.COOKIES.items():\n # If the cookie is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example includes plugins that e.g. store cookie banner state.\n if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):\n cache_key_parts.append(f'{c}={v}')\n for c, v in request.session.items():\n # If the session key is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.\n if (\n not c.startswith('_auth') and\n not c.startswith('pretix_auth_') and\n not c.startswith('customer_auth_') and\n not c.startswith('current_cart_') and\n not c.startswith('cart_') and\n not c.startswith('payment_') and\n c not in ('carts', 'payment', 'pinned_user_agent')\n ):\n cache_key_parts.append(f'{c}={repr(v)}')\n\n cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(\":\".join(cache_key_parts).encode()).hexdigest()}'\n cache_timeout = 15\n cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]\n\n response = cache.get(cache_key)\n if response is not None:\n return response\n\n response = super().dispatch(request, *kwargs, **kwargs)\n if response.status_code >= 400:\n return response\n\n if hasattr(response, 'render') and callable(response.render):\n def _store_to_cache(r):\n cache.set(cache_key, r, cache_timeout)\n\n response.add_post_render_callback(_store_to_cache)\n else:\n cache.set(cache_key, response, cache_timeout)\n return response\n\n def get(self, request, *args, **kwargs):\n style = request.GET.get(\"style\", request.organizer.settings.event_list_type)\n if style == \"calendar\":\n cv = CalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"day\":\n cv = DayCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"week\":\n cv = WeekCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n else:\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n return self._get_event_queryset()\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n for event in ctx['events']:\n event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))\n if event.has_subevents:\n event.daterange = daterange(\n event.min_from.astimezone(event.tzname),\n (event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)\n )\n return ctx\n\n\ndef has_before_after(eventqs, subeventqs, before, after):\n eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)\n sqs = subeventqs.filter(active=True, is_public=True)\n return (\n eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),\n eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()\n )\n\n\ndef add_events_for_days(request, baseqs, before, after, ebd, timezones):\n qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n if hasattr(request, 'organizer'):\n qs = filter_qs_by_attr(qs, request)\n for event in qs:\n timezones.add(event.settings.timezones)\n tz = pytz.timezone(event.settings.timezone)\n datetime_from = event.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if event.settings.show_date_to and event.date_to:\n datetime_to = event.date_to.astimezone(tz)\n date_to = event.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'event': event,\n 'continued': not first,\n 'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and event.settings.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and event.settings.show_times\n else None\n ),\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': event,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n\n\ndef add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):\n qs = qs.filter(active=True, is_public=True).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n )\n\n quotas_to_compute = []\n for se in qs:\n if se.presale_is_running:\n quotas_to_compute += se.active_quotas\n\n qcache = {}\n if quotas_to_compute:\n qa = QuotaAvailability()\n qa.queue(*quotas_to_compute)\n qa.compute(allow_cache=True)\n qcache.update(qa.results)\n\n for se in qs:\n if qcache:\n se._quota_cache = qcache\n kwargs = {'subevent': se.pk}\n if cart_namespace:\n kwargs['cart_namespace'] = cart_namespace\n\n s = event.settings if event else se.event.settings\n\n if s.event_list_available_only:\n hide = se.presale_has_ended or (\n (not voucher or not voucher.allow_ignore_quota) and\n se.best_availability_state is not None and\n se.best_availability_state < Quota.AVAILABILITY_RESERVED\n )\n if hide:\n continue\n\n timezones.add(s.timezones)\n tz = pytz.timezone(s.timezone)\n datetime_from = se.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if s.show_date_to and se.date_to:\n datetime_to = se.date_to.astimezone(tz)\n date_to = se.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'continued': not first,\n 'timezone': s.timezone,\n 'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and s.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and s.show_times\n else None\n ),\n 'event': se,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n )\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': se,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n ),\n 'timezone': s.timezone,\n })\n\n\ndef sort_ev(e):\n return e['time'] or time(0, 0, 0), str(e['event'].name)\n\n\ndef days_for_template(ebd, week):\n day_format = get_format('WEEK_DAY_FORMAT')\n if day_format == 'WEEK_DAY_FORMAT':\n day_format = 'SHORT_DATE_FORMAT'\n return [\n {\n 'day_formatted': date_format(day, day_format),\n 'date': day,\n 'today': day == now().astimezone(get_current_timezone()).date(),\n 'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []\n }\n for day in week.days()\n ]\n\n\ndef weeks_for_template(ebd, year, month):\n calendar.setfirstweekday(0) # TODO: Configurable\n return [\n [\n {\n 'day': day,\n 'date': date(year, month, day),\n 'events': (\n sorted(ebd.get(date(year, month, day)), key=sort_ev)\n if date(year, month, day) in ebd else None\n )\n }\n if day > 0\n else None\n for day in week\n ]\n for week in calendar.monthcalendar(year, month)\n ]\n\n\nclass CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old month-year-URLs to new date-URLs\n keys = (\"month\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-%s\" % (request.GET.get(\"year\"), request.GET.get(\"month\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_month_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n try:\n _, ndays = calendar.monthrange(self.year, self.month)\n except calendar.IllegalMonthError:\n raise Http404()\n before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)\n after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)\n\n ctx['date'] = date(self.year, self.month, 1)\n ctx['before'] = before\n ctx['after'] = after\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['multiple_timezones'] = self._multiple_timezones\n ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)\n ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]\n ctx['years'] = range(now().year - 2, now().year + 3)\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_week.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old week-year-URLs to new date-URLs\n keys = (\"week\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-W%s\" % (request.GET.get(\"year\"), request.GET.get(\"week\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_week_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n week = isoweek.Week(self.year, self.week)\n before = datetime(\n week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = week.monday()\n ctx['before'] = before\n ctx['after'] = after\n\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['days'] = days_for_template(ebd, week)\n years = (self.year - 1, self.year, self.year + 1)\n weeks = []\n for year in years:\n weeks += [\n (date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))\n for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)\n ]\n ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]\n ctx['week_format'] = get_format('WEEK_FORMAT')\n if ctx['week_format'] == 'WEEK_FORMAT':\n ctx['week_format'] = WEEK_FORMAT\n ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')\n if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':\n ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT\n ctx['multiple_timezones'] = self._multiple_timezones\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_day.html'\n\n def _set_date_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n date_from__gte=now(),\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n self.tz = pytz.timezone(next_ev.settings.timezone)\n self.date = datetime_from.astimezone(self.tz).date()\n else:\n self.tz = self.request.organizer.timezone\n self.date = now().astimezone(self.tz).date()\n\n def _set_date(self):\n if 'date' in self.request.GET:\n self.tz = self.request.organizer.timezone\n try:\n self.date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n self.date = now().astimezone(self.tz).date()\n else:\n self._set_date_to_next_event()\n\n def get(self, request, *args, **kwargs):\n self._set_date()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n before = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = self.date\n ctx['cal_tz'] = self.tz\n ctx['before'] = before\n ctx['after'] = after\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ebd = self._events_by_day(before, after)\n if not ebd[self.date]:\n return ctx\n\n events = ebd[self.date]\n shortest_duration = self._get_shortest_duration(events).total_seconds() // 60\n # pick the next biggest tick_duration based on shortest_duration, max. 180 minutes\n tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)\n\n raster_size = min(self._get_raster_size(events), tick_duration)\n events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)\n calendar_duration = self._get_time_duration(start, end)\n ctx[\"calendar_duration\"] = self._format_duration(calendar_duration)\n ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)\n ctx['start'] = datetime.combine(self.date, start)\n ctx['raster_size'] = raster_size\n # ctx['end'] = end\n # size of each grid-column is based on shortest event duration and raster_size\n # raster_size is based on start/end times, so it could happen we have a small raster but long running events\n # raster_size will always be smaller or equals tick_duration\n ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)\n\n ctx['events'] = events\n\n events_by_series = self._grid_for_template(events)\n ctx['collections'] = events_by_series\n ctx['no_headlines'] = not any([series for series, events in events_by_series])\n ctx['multiple_timezones'] = self._multiple_timezones\n return ctx\n\n def _get_raster_size(self, events):\n # get best raster-size for min. # of columns in grid\n # due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)\n\n # all start- and end-times (minute-part) except full hour\n times = [\n e[\"time\"].minute for e in events if e[\"time\"] and e[\"time\"].minute\n ] + [\n e[\"time_end_today\"].minute for e in events if \"time_end_today\" in e and e[\"time_end_today\"] and e[\"time_end_today\"].minute\n ]\n if not times:\n # no time other than full hour, so raster can be 1 hour/60 minutes\n return 60\n gcd = reduce(math.gcd, set(times))\n return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)\n\n def _get_time_duration(self, start, end):\n midnight = time(0, 0)\n return datetime.combine(\n self.date if end != midnight else self.date + timedelta(days=1),\n end\n ) - datetime.combine(\n self.date,\n start\n )\n\n def _format_duration(self, duration):\n return \":\".join([\n \"%02d\" % i for i in (\n (duration.days * 24) + (duration.seconds // 3600),\n (duration.seconds // 60) % 60\n )\n ])\n\n def _floor_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if minutes % raster_size:\n minutes = (minutes // raster_size) * raster_size\n return t.replace(hour=minutes // 60, minute=minutes % 60)\n return t\n\n def _ceil_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if not minutes % raster_size:\n return t\n minutes = math.ceil(minutes / raster_size) * raster_size\n minute = minutes % 60\n hour = minutes // 60\n if hour > 23:\n hour = hour % 24\n return t.replace(minute=minute, hour=hour)\n\n def _rasterize_events(self, events, tick_duration, raster_size=5):\n rastered_events = []\n start, end = self._get_time_range(events)\n start = self._floor_time(start, raster_size=tick_duration)\n end = self._ceil_time(end, raster_size=tick_duration)\n\n midnight = time(0, 0)\n for e in events:\n t = e[\"time\"] or time(0, 0)\n e[\"offset_shift_start\"] = 0\n if e[\"continued\"]:\n e[\"time_rastered\"] = midnight\n elif t.minute % raster_size:\n e[\"time_rastered\"] = t.replace(minute=(t.minute // raster_size) * raster_size)\n e[\"offset_shift_start\"] = t.minute % raster_size\n else:\n e[\"time_rastered\"] = t\n\n e[\"offset_shift_end\"] = 0\n if \"time_end_today\" in e and e[\"time_end_today\"]:\n if e[\"time_end_today\"].minute % raster_size:\n minute = math.ceil(e[\"time_end_today\"].minute / raster_size) * raster_size\n hour = e[\"time_end_today\"].hour\n if minute > 59:\n minute = minute % 60\n hour = (hour + 1) % 24\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"].replace(minute=minute, hour=hour)\n e[\"offset_shift_end\"] = raster_size - e[\"time_end_today\"].minute % raster_size\n else:\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"]\n else:\n e[\"time_end_today\"] = e[\"time_end_today_rastered\"] = time(0, 0)\n\n e[\"duration_rastered\"] = self._format_duration(datetime.combine(\n self.date if e[\"time_end_today_rastered\"] != midnight else self.date + timedelta(days=1),\n e[\"time_end_today_rastered\"]\n ) - datetime.combine(\n self.date,\n e['time_rastered']\n ))\n\n e[\"offset_rastered\"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e[\"time_rastered\"])\n\n rastered_events.append(e)\n\n return rastered_events, start, end\n\n def _get_shortest_duration(self, events):\n midnight = time(0, 0)\n durations = [\n datetime.combine(\n self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),\n e['time_end_today'] if e.get('time_end_today') else time(0, 0)\n )\n -\n datetime.combine(\n self.date,\n time(0, 0) if e['continued'] else (e['time'] or time(0, 0))\n )\n for e in events\n ]\n return min([d for d in durations])\n\n def _get_time_range(self, events):\n if any(e['continued'] for e in events) or any(e['time'] is None for e in events):\n starting_at = time(0, 0)\n else:\n starting_at = min(e['time'] for e in events)\n\n if any(e.get('time_end_today') is None for e in events):\n ending_at = time(0, 0)\n else:\n ending_at = max(e['time_end_today'] for e in events)\n\n return starting_at, ending_at\n\n def _get_time_ticks(self, start, end, tick_duration):\n ticks = []\n tick_duration = timedelta(minutes=tick_duration)\n\n # convert time to datetime for timedelta calc\n start = datetime.combine(self.date, start)\n end = datetime.combine(self.date, end)\n if end <= start:\n end = end + timedelta(days=1)\n\n tick_start = start\n offset = datetime.utcfromtimestamp(0)\n duration = datetime.utcfromtimestamp(tick_duration.total_seconds())\n while tick_start < end:\n tick = {\n \"start\": tick_start,\n \"duration\": duration,\n \"offset\": offset,\n }\n ticks.append(tick)\n tick_start += tick_duration\n offset += tick_duration\n\n return ticks\n\n def _grid_for_template(self, events):\n midnight = time(0, 0)\n rows_by_collection = defaultdict(list)\n\n # We sort the events into \"collections\": all subevents from the same\n # event series together and all non-series events into a \"None\"\n # collection. Then, we look if there's already an event in the\n # collection that overlaps, in which case we need to split the\n # collection into multiple rows.\n for counter, e in enumerate(events):\n collection = e['event'].event if isinstance(e['event'], SubEvent) else None\n\n placed_in_row = False\n for row in rows_by_collection[collection]:\n if any(\n (e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and\n (o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)\n for o in row\n ):\n continue\n row.append(e)\n placed_in_row = True\n break\n\n if not placed_in_row:\n rows_by_collection[collection].append([e])\n\n # flatten rows to one stream of events with attribute row\n # for better keyboard-tab-order in html\n for collection in rows_by_collection:\n for i, row in enumerate(rows_by_collection[collection]):\n concurrency = i + 1\n for e in row:\n e[\"concurrency\"] = concurrency\n rows_by_collection[collection] = {\n \"concurrency\": len(rows_by_collection[collection]),\n \"events\": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),\n }\n\n def sort_key(c):\n collection, row = c\n if collection is None:\n return ''\n else:\n return str(collection.name)\n return sorted(rows_by_collection.items(), key=sort_key)\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\n@method_decorator(cache_page(300), name='dispatch')\nclass OrganizerIcalDownload(OrganizerViewMixin, View):\n def get(self, request, *args, **kwargs):\n cutoff = now() - timedelta(days=31)\n events = list(\n filter_qs_by_attr(\n self.request.organizer.events.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n is_public=True,\n live=True,\n has_subevents=False,\n sales_channels__contains=self.request.sales_channel.identifier,\n ),\n request\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n events += list(\n filter_qs_by_attr(\n SubEvent.objects.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n is_public=True,\n active=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n request\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n ).order_by(\n 'date_from'\n )\n )\n\n if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):\n with language(request.GET.get('locale'), self.request.organizer.settings.region):\n cal = get_public_ical(events)\n else:\n cal = get_public_ical(events)\n\n resp = HttpResponse(cal.serialize(), content_type='text/calendar')\n resp['Content-Disposition'] = 'attachment; filename=\"{}.ics\"'.format(\n request.organizer.slug\n )\n if request.organizer.settings.meta_noindex:\n resp['X-Robots-Tag'] = 'noindex'\n return resp\n",
"path": "src/pretix/presale/views/organizer.py"
}
] | [
{
"content": "#\n# This file is part of pretix (Community Edition).\n#\n# Copyright (C) 2014-2020 Raphael Michel and contributors\n# Copyright (C) 2020-2021 rami.io GmbH and contributors\n#\n# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General\n# Public License as published by the Free Software Foundation in version 3 of the License.\n#\n# ADDITIONAL TERMS APPLY: Pursuant to Section 7 of the GNU Affero General Public License, additional terms are\n# applicable granting you additional permissions and placing additional restrictions on your usage of this software.\n# Please refer to the pretix LICENSE file to obtain the full terms applicable to this work. If you did not receive\n# this file, see <https://pretix.eu/about/en/license>.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied\n# warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more\n# details.\n#\n# You should have received a copy of the GNU Affero General Public License along with this program. If not, see\n# <https://www.gnu.org/licenses/>.\n#\n\n# This file is based on an earlier version of pretix which was released under the Apache License 2.0. The full text of\n# the Apache License 2.0 can be obtained at <http://www.apache.org/licenses/LICENSE-2.0>.\n#\n# This file may have since been changed and any changes are released under the terms of AGPLv3 as described above. A\n# full history of changes and contributors is available at <https://github.com/pretix/pretix>.\n#\n# This file contains Apache-licensed contributions copyrighted by: Jan Felix Wiebe, Mohit Jindal\n#\n# Unless required by applicable law or agreed to in writing, software distributed under the Apache License 2.0 is\n# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under the License.\nimport calendar\nimport hashlib\nimport math\nfrom collections import defaultdict\nfrom datetime import date, datetime, time, timedelta\nfrom functools import reduce\nfrom urllib.parse import quote, urlencode\n\nimport dateutil\nimport isoweek\nimport pytz\nfrom django.conf import settings\nfrom django.core.cache import caches\nfrom django.db.models import Exists, Max, Min, OuterRef, Prefetch, Q\nfrom django.db.models.functions import Coalesce, Greatest\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import redirect\nfrom django.utils.decorators import method_decorator\nfrom django.utils.formats import date_format, get_format\nfrom django.utils.timezone import get_current_timezone, now\nfrom django.views import View\nfrom django.views.decorators.cache import cache_page\nfrom django.views.generic import ListView, TemplateView\nfrom pytz import UTC\n\nfrom pretix.base.i18n import language\nfrom pretix.base.models import (\n Event, EventMetaValue, Organizer, Quota, SubEvent, SubEventMetaValue,\n)\nfrom pretix.base.services.quotas import QuotaAvailability\nfrom pretix.helpers.compat import date_fromisocalendar\nfrom pretix.helpers.daterange import daterange\nfrom pretix.helpers.formats.en.formats import (\n SHORT_MONTH_DAY_FORMAT, WEEK_FORMAT,\n)\nfrom pretix.multidomain.urlreverse import eventreverse\nfrom pretix.presale.ical import get_public_ical\nfrom pretix.presale.views import OrganizerViewMixin\n\n\ndef filter_qs_by_attr(qs, request):\n \"\"\"\n We'll allow to filter the event list using attributes defined in the event meta data\n models in the format ?attr[meta_name]=meta_value\n \"\"\"\n attrs = {}\n for i, item in enumerate(request.GET.items()):\n k, v = item\n if k.startswith(\"attr[\") and k.endswith(\"]\"):\n attrs[k[5:-1]] = v\n\n skey = 'filter_qs_by_attr_{}_{}'.format(request.organizer.pk, request.event.pk if hasattr(request, 'event') else '')\n if request.GET.get('attr_persist'):\n request.session[skey] = attrs\n elif skey in request.session:\n attrs = request.session[skey]\n\n props = {\n p.name: p for p in request.organizer.meta_properties.filter(\n name__in=attrs.keys()\n )\n }\n\n for i, item in enumerate(attrs.items()):\n attr, v = item\n emv_with_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n value=v\n )\n emv_with_any_value = EventMetaValue.objects.filter(\n event=OuterRef('event' if qs.model == SubEvent else 'pk'),\n property__name=attr,\n )\n if qs.model == SubEvent:\n semv_with_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n value=v\n )\n semv_with_any_value = SubEventMetaValue.objects.filter(\n subevent=OuterRef('pk'),\n property__name=attr,\n )\n\n prop = props.get(attr)\n if not prop:\n continue\n annotations = {'attr_{}'.format(i): Exists(emv_with_value)}\n if qs.model == SubEvent:\n annotations['attr_{}_sub'.format(i)] = Exists(semv_with_value)\n annotations['attr_{}_sub_any'.format(i)] = Exists(semv_with_any_value)\n filters = Q(**{'attr_{}_sub'.format(i): True})\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}'.format(i): True}))\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(Q(**{'attr_{}_sub_any'.format(i): False}) & Q(**{'attr_{}_any'.format(i): False}))\n else:\n filters = Q(**{'attr_{}'.format(i): True})\n if prop.default == v:\n annotations['attr_{}_any'.format(i)] = Exists(emv_with_any_value)\n filters |= Q(**{'attr_{}_any'.format(i): False})\n\n qs = qs.annotate(**annotations).filter(filters)\n return qs\n\n\nclass EventListMixin:\n\n def _get_event_queryset(self):\n query = Q(is_public=True) & Q(live=True)\n qs = self.request.organizer.events.using(settings.DATABASE_REPLICA).filter(query)\n qs = qs.filter(sales_channels__contains=self.request.sales_channel.identifier)\n qs = qs.annotate(\n min_from=Min('subevents__date_from'),\n min_to=Min('subevents__date_to'),\n max_from=Max('subevents__date_from'),\n max_to=Max('subevents__date_to'),\n max_fromto=Greatest(Max('subevents__date_to'), Max('subevents__date_from')),\n )\n if \"old\" in self.request.GET:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__lt=now()) | Q(Q(date_to__isnull=True) & Q(date_from__lt=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(min_to__lt=now()) | Q(min_from__lt=now()))\n )\n ).annotate(\n order_to=Coalesce('max_fromto', 'max_to', 'max_from', 'date_to', 'date_from'),\n ).order_by('-order_to')\n else:\n qs = qs.filter(\n Q(Q(has_subevents=False) & Q(\n Q(date_to__gte=now()) | Q(Q(date_to__isnull=True) & Q(date_from__gte=now()))\n )) | Q(Q(has_subevents=True) & Q(\n Q(max_to__gte=now()) | Q(max_from__gte=now()))\n )\n ).annotate(\n order_from=Coalesce('min_from', 'date_from'),\n ).order_by('order_from')\n qs = Event.annotated(filter_qs_by_attr(qs, self.request))\n return qs\n\n def _set_month_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).year\n self.month = datetime_from.astimezone(tz).month\n else:\n self.year = now().year\n self.month = now().month\n\n def _set_month_year(self):\n if 'date' in self.request.GET:\n try:\n date = dateutil.parser.isoparse(self.request.GET.get('date')).date()\n except ValueError:\n date = now().date()\n self.year = date.year\n self.month = date.month\n else:\n if hasattr(self.request, 'event'):\n self._set_month_to_next_subevent()\n else:\n self._set_month_to_next_event()\n\n def _set_week_to_next_subevent(self):\n tz = pytz.timezone(self.request.event.settings.timezone)\n next_sev = self.request.event.subevents.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n active=True,\n is_public=True,\n ).select_related('event').order_by('date_from').first()\n\n if next_sev:\n datetime_from = next_sev.date_from\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n has_subevents=False\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n tz = pytz.timezone(next_ev.settings.timezone)\n self.year = datetime_from.astimezone(tz).isocalendar()[0]\n self.week = datetime_from.astimezone(tz).isocalendar()[1]\n else:\n self.year = now().isocalendar()[0]\n self.week = now().isocalendar()[1]\n\n def _set_week_year(self):\n if 'date' in self.request.GET:\n try:\n iso = dateutil.parser.isoparse(self.request.GET.get('date')).isocalendar()\n except ValueError:\n iso = now().isocalendar()\n self.year = iso[0]\n self.week = iso[1]\n else:\n if hasattr(self.request, 'event'):\n self._set_week_to_next_subevent()\n else:\n self._set_week_to_next_event()\n\n\nclass OrganizerIndex(OrganizerViewMixin, EventListMixin, ListView):\n model = Event\n context_object_name = 'events'\n template_name = 'pretixpresale/organizers/index.html'\n paginate_by = 30\n\n def dispatch(self, request, *args, **kwargs):\n # In stock pretix, nothing on this page is session-dependent except for the language and the customer login part,\n # so we can cache pretty aggressively if the user is anonymous. Note that we deliberately implement the caching\n # on the view layer, *after* all middlewares have been ran, so we have access to the computed locale, as well\n # as the login status etc.\n cache_allowed = (\n settings.CACHE_LARGE_VALUES_ALLOWED and\n not getattr(request, 'customer', None) and\n not request.user.is_authenticated\n )\n\n if not cache_allowed:\n return super().dispatch(request, *args, **kwargs)\n\n cache_key_parts = [\n request.method,\n request.host,\n str(request.organizer.pk),\n request.get_full_path(),\n request.LANGUAGE_CODE,\n self.request.sales_channel.identifier,\n ]\n for c, v in request.COOKIES.items():\n # If the cookie is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example includes plugins that e.g. store cookie banner state.\n if c not in (settings.SESSION_COOKIE_NAME, settings.LANGUAGE_COOKIE_NAME, settings.CSRF_COOKIE_NAME) and not c.startswith('__'):\n cache_key_parts.append(f'{c}={v}')\n for c, v in request.session.items():\n # If the session key is not one we know, it might be set by a plugin and we need to include it in the\n # cache key to be safe. A known example would be the pretix-campaigns plugin setting the campaign ID.\n if (\n not c.startswith('_auth') and\n not c.startswith('pretix_auth_') and\n not c.startswith('customer_auth_') and\n not c.startswith('current_cart_') and\n not c.startswith('cart_') and\n not c.startswith('payment_') and\n c not in ('carts', 'payment', 'pinned_user_agent')\n ):\n cache_key_parts.append(f'{c}={repr(v)}')\n\n cache_key = f'pretix.presale.views.organizer.OrganizerIndex:{hashlib.md5(\":\".join(cache_key_parts).encode()).hexdigest()}'\n cache_timeout = 15\n cache = caches[settings.CACHE_LARGE_VALUES_ALIAS]\n\n response = cache.get(cache_key)\n if response is not None:\n return response\n\n response = super().dispatch(request, *kwargs, **kwargs)\n if response.status_code >= 400:\n return response\n\n if hasattr(response, 'render') and callable(response.render):\n def _store_to_cache(r):\n cache.set(cache_key, r, cache_timeout)\n\n response.add_post_render_callback(_store_to_cache)\n else:\n cache.set(cache_key, response, cache_timeout)\n return response\n\n def get(self, request, *args, **kwargs):\n style = request.GET.get(\"style\", request.organizer.settings.event_list_type)\n if style == \"calendar\":\n cv = CalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"day\":\n cv = DayCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n elif style == \"week\":\n cv = WeekCalendarView()\n cv.request = request\n return cv.get(request, *args, **kwargs)\n else:\n return super().get(request, *args, **kwargs)\n\n def get_queryset(self):\n return self._get_event_queryset()\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data(**kwargs)\n for event in ctx['events']:\n event.tzname = pytz.timezone(event.cache.get_or_set('timezone', lambda: event.settings.timezone))\n if event.has_subevents:\n event.daterange = daterange(\n event.min_from.astimezone(event.tzname),\n (event.max_fromto or event.max_to or event.max_from).astimezone(event.tzname)\n )\n return ctx\n\n\ndef has_before_after(eventqs, subeventqs, before, after):\n eqs = eventqs.filter(is_public=True, live=True, has_subevents=False)\n sqs = subeventqs.filter(active=True, is_public=True)\n return (\n eqs.filter(Q(date_from__lte=before)).exists() or sqs.filter(Q(date_from__lte=before)).exists(),\n eqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists() or sqs.filter(Q(date_to__gte=after) | Q(date_from__gte=after)).exists()\n )\n\n\ndef add_events_for_days(request, baseqs, before, after, ebd, timezones):\n qs = baseqs.filter(is_public=True, live=True, has_subevents=False).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n if hasattr(request, 'organizer'):\n qs = filter_qs_by_attr(qs, request)\n for event in qs:\n timezones.add(event.settings.timezones)\n tz = pytz.timezone(event.settings.timezone)\n datetime_from = event.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if event.settings.show_date_to and event.date_to:\n datetime_to = event.date_to.astimezone(tz)\n date_to = event.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'event': event,\n 'continued': not first,\n 'time': datetime_from.time().replace(tzinfo=None) if first and event.settings.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and event.settings.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and event.settings.show_times\n else None\n ),\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': event,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if event.settings.show_times else None,\n 'url': eventreverse(event, 'presale:event.index'),\n 'timezone': event.settings.timezone,\n })\n\n\ndef add_subevents_for_days(qs, before, after, ebd, timezones, event=None, cart_namespace=None, voucher=None):\n qs = qs.filter(active=True, is_public=True).filter(\n Q(Q(date_to__gte=before) & Q(date_from__lte=after)) |\n Q(Q(date_from__lte=after) & Q(date_to__gte=before)) |\n Q(Q(date_to__isnull=True) & Q(date_from__gte=before) & Q(date_from__lte=after))\n ).order_by(\n 'date_from'\n )\n\n quotas_to_compute = []\n for se in qs:\n if se.presale_is_running:\n quotas_to_compute += se.active_quotas\n\n qcache = {}\n if quotas_to_compute:\n qa = QuotaAvailability()\n qa.queue(*quotas_to_compute)\n qa.compute(allow_cache=True)\n qcache.update(qa.results)\n\n for se in qs:\n if qcache:\n se._quota_cache = qcache\n kwargs = {'subevent': se.pk}\n if cart_namespace:\n kwargs['cart_namespace'] = cart_namespace\n\n s = event.settings if event else se.event.settings\n\n if s.event_list_available_only:\n hide = se.presale_has_ended or (\n (not voucher or not voucher.allow_ignore_quota) and\n se.best_availability_state is not None and\n se.best_availability_state < Quota.AVAILABILITY_RESERVED\n )\n if hide:\n continue\n\n timezones.add(s.timezones)\n tz = pytz.timezone(s.timezone)\n datetime_from = se.date_from.astimezone(tz)\n date_from = datetime_from.date()\n if s.show_date_to and se.date_to:\n datetime_to = se.date_to.astimezone(tz)\n date_to = se.date_to.astimezone(tz).date()\n d = max(date_from, before.date())\n while d <= date_to and d <= after.date():\n first = d == date_from\n ebd[d].append({\n 'continued': not first,\n 'timezone': s.timezone,\n 'time': datetime_from.time().replace(tzinfo=None) if first and s.show_times else None,\n 'time_end': (\n datetime_to.time().replace(tzinfo=None)\n if (date_to == date_from or (\n date_to == date_from + timedelta(days=1) and datetime_to.time() < datetime_from.time()\n )) and s.show_times\n else None\n ),\n 'time_end_today': (\n datetime_to.time().replace(tzinfo=None)\n if date_to == d and s.show_times\n else None\n ),\n 'event': se,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n )\n })\n d += timedelta(days=1)\n\n else:\n ebd[date_from].append({\n 'event': se,\n 'continued': False,\n 'time': datetime_from.time().replace(tzinfo=None) if s.show_times else None,\n 'url': (\n eventreverse(se.event, 'presale:event.redeem',\n kwargs={k: v for k, v in kwargs.items() if k != 'subevent'}) + f'?subevent={se.pk}&voucher={quote(voucher.code)}'\n if voucher\n else eventreverse(se.event, 'presale:event.index', kwargs=kwargs)\n ),\n 'timezone': s.timezone,\n })\n\n\ndef sort_ev(e):\n return e['time'] or time(0, 0, 0), str(e['event'].name)\n\n\ndef days_for_template(ebd, week):\n day_format = get_format('WEEK_DAY_FORMAT')\n if day_format == 'WEEK_DAY_FORMAT':\n day_format = 'SHORT_DATE_FORMAT'\n return [\n {\n 'day_formatted': date_format(day, day_format),\n 'date': day,\n 'today': day == now().astimezone(get_current_timezone()).date(),\n 'events': sorted(ebd.get(day), key=sort_ev) if day in ebd else []\n }\n for day in week.days()\n ]\n\n\ndef weeks_for_template(ebd, year, month):\n calendar.setfirstweekday(0) # TODO: Configurable\n return [\n [\n {\n 'day': day,\n 'date': date(year, month, day),\n 'events': (\n sorted(ebd.get(date(year, month, day)), key=sort_ev)\n if date(year, month, day) in ebd else None\n )\n }\n if day > 0\n else None\n for day in week\n ]\n for week in calendar.monthcalendar(year, month)\n ]\n\n\nclass CalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old month-year-URLs to new date-URLs\n keys = (\"month\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-%s\" % (request.GET.get(\"year\"), request.GET.get(\"month\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_month_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n try:\n _, ndays = calendar.monthrange(self.year, self.month)\n except calendar.IllegalMonthError:\n raise Http404()\n before = datetime(self.year, self.month, 1, 0, 0, 0, tzinfo=UTC) - timedelta(days=1)\n after = datetime(self.year, self.month, ndays, 0, 0, 0, tzinfo=UTC) + timedelta(days=1)\n\n ctx['date'] = date(self.year, self.month, 1)\n ctx['before'] = before\n ctx['after'] = after\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['multiple_timezones'] = self._multiple_timezones\n ctx['weeks'] = weeks_for_template(ebd, self.year, self.month)\n ctx['months'] = [date(self.year, i + 1, 1) for i in range(12)]\n ctx['years'] = range(now().year - 2, now().year + 3)\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass WeekCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_week.html'\n\n def get(self, request, *args, **kwargs):\n # redirect old week-year-URLs to new date-URLs\n keys = (\"week\", \"year\")\n if all(k in request.GET for k in keys):\n get_params = {k: v for k, v in request.GET.items() if k not in keys}\n get_params[\"date\"] = \"%s-W%s\" % (request.GET.get(\"year\"), request.GET.get(\"week\"))\n return redirect(self.request.path + \"?\" + urlencode(get_params))\n\n self._set_week_year()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n week = isoweek.Week(self.year, self.week)\n before = datetime(\n week.monday().year, week.monday().month, week.monday().day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n week.sunday().year, week.sunday().month, week.sunday().day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = week.monday()\n ctx['before'] = before\n ctx['after'] = after\n\n ebd = self._events_by_day(before, after)\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ctx['days'] = days_for_template(ebd, week)\n years = (self.year - 1, self.year, self.year + 1)\n weeks = []\n for year in years:\n weeks += [\n (date_fromisocalendar(year, i + 1, 1), date_fromisocalendar(year, i + 1, 7))\n for i in range(53 if date(year, 12, 31).isocalendar()[1] == 53 else 52)\n ]\n ctx['weeks'] = [[w for w in weeks if w[0].year == year] for year in years]\n ctx['week_format'] = get_format('WEEK_FORMAT')\n if ctx['week_format'] == 'WEEK_FORMAT':\n ctx['week_format'] = WEEK_FORMAT\n ctx['short_month_day_format'] = get_format('SHORT_MONTH_DAY_FORMAT')\n if ctx['short_month_day_format'] == 'SHORT_MONTH_DAY_FORMAT':\n ctx['short_month_day_format'] = SHORT_MONTH_DAY_FORMAT\n ctx['multiple_timezones'] = self._multiple_timezones\n\n return ctx\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\nclass DayCalendarView(OrganizerViewMixin, EventListMixin, TemplateView):\n template_name = 'pretixpresale/organizers/calendar_day.html'\n\n def _set_date_to_next_event(self):\n next_ev = filter_qs_by_attr(Event.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n organizer=self.request.organizer,\n live=True,\n is_public=True,\n date_from__gte=now(),\n ), self.request).order_by('date_from').first()\n next_sev = filter_qs_by_attr(SubEvent.objects.using(settings.DATABASE_REPLICA).filter(\n Q(date_from__gte=now()) | Q(date_to__isnull=False, date_to__gte=now()),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n active=True,\n is_public=True,\n ), self.request).select_related('event').order_by('date_from').first()\n\n datetime_from = None\n if (next_ev and next_sev and next_sev.date_from < next_ev.date_from) or (next_sev and not next_ev):\n datetime_from = next_sev.date_from\n next_ev = next_sev.event\n elif next_ev:\n datetime_from = next_ev.date_from\n\n if datetime_from:\n self.tz = pytz.timezone(next_ev.settings.timezone)\n self.date = datetime_from.astimezone(self.tz).date()\n else:\n self.tz = self.request.organizer.timezone\n self.date = now().astimezone(self.tz).date()\n\n def _set_date(self):\n if 'date' in self.request.GET:\n self.tz = self.request.organizer.timezone\n try:\n self.date = dateutil.parser.parse(self.request.GET.get('date')).date()\n except ValueError:\n self.date = now().astimezone(self.tz).date()\n else:\n self._set_date_to_next_event()\n\n def get(self, request, *args, **kwargs):\n self._set_date()\n return super().get(request, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n ctx = super().get_context_data()\n\n before = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) - timedelta(days=1)\n after = datetime(\n self.date.year, self.date.month, self.date.day, 0, 0, 0, tzinfo=UTC\n ) + timedelta(days=1)\n\n ctx['date'] = self.date\n ctx['cal_tz'] = self.tz\n ctx['before'] = before\n ctx['after'] = after\n\n ctx['has_before'], ctx['has_after'] = has_before_after(\n self.request.organizer.events.filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ),\n SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n before,\n after,\n )\n\n ebd = self._events_by_day(before, after)\n if not ebd[self.date]:\n return ctx\n\n events = ebd[self.date]\n shortest_duration = self._get_shortest_duration(events).total_seconds() // 60\n # pick the next biggest tick_duration based on shortest_duration, max. 180 minutes\n tick_duration = next((d for d in [5, 10, 15, 30, 60, 120, 180] if d >= shortest_duration), 180)\n\n raster_size = min(self._get_raster_size(events), tick_duration)\n events, start, end = self._rasterize_events(events, tick_duration=tick_duration, raster_size=raster_size)\n calendar_duration = self._get_time_duration(start, end)\n ctx[\"calendar_duration\"] = self._format_duration(calendar_duration)\n ctx['time_ticks'] = self._get_time_ticks(start, end, tick_duration)\n ctx['start'] = datetime.combine(self.date, start)\n ctx['raster_size'] = raster_size\n # ctx['end'] = end\n # size of each grid-column is based on shortest event duration and raster_size\n # raster_size is based on start/end times, so it could happen we have a small raster but long running events\n # raster_size will always be smaller or equals tick_duration\n ctx['raster_to_shortest_ratio'] = round((8 * raster_size) / shortest_duration)\n\n ctx['events'] = events\n\n events_by_series = self._grid_for_template(events)\n ctx['collections'] = events_by_series\n ctx['no_headlines'] = not any([series for series, events in events_by_series])\n ctx['multiple_timezones'] = self._multiple_timezones\n return ctx\n\n def _get_raster_size(self, events):\n # get best raster-size for min. # of columns in grid\n # due to grid-col-calculations in CSS raster_size cannot be bigger than 60 (minutes)\n\n # all start- and end-times (minute-part) except full hour\n times = [\n e[\"time\"].minute for e in events if e[\"time\"] and e[\"time\"].minute\n ] + [\n e[\"time_end_today\"].minute for e in events if \"time_end_today\" in e and e[\"time_end_today\"] and e[\"time_end_today\"].minute\n ]\n if not times:\n # no time other than full hour, so raster can be 1 hour/60 minutes\n return 60\n gcd = reduce(math.gcd, set(times))\n return next((d for d in [5, 10, 15, 30, 60] if d >= gcd), 60)\n\n def _get_time_duration(self, start, end):\n midnight = time(0, 0)\n return datetime.combine(\n self.date if end != midnight else self.date + timedelta(days=1),\n end\n ) - datetime.combine(\n self.date,\n start\n )\n\n def _format_duration(self, duration):\n return \":\".join([\n \"%02d\" % i for i in (\n (duration.days * 24) + (duration.seconds // 3600),\n (duration.seconds // 60) % 60\n )\n ])\n\n def _floor_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if minutes % raster_size:\n minutes = (minutes // raster_size) * raster_size\n return t.replace(hour=minutes // 60, minute=minutes % 60)\n return t\n\n def _ceil_time(self, t, raster_size=5):\n # raster_size based on minutes, might be factored into a helper class with a timedelta as raster\n minutes = t.hour * 60 + t.minute\n if not minutes % raster_size:\n return t\n minutes = math.ceil(minutes / raster_size) * raster_size\n minute = minutes % 60\n hour = minutes // 60\n if hour > 23:\n hour = hour % 24\n return t.replace(minute=minute, hour=hour)\n\n def _rasterize_events(self, events, tick_duration, raster_size=5):\n rastered_events = []\n start, end = self._get_time_range(events)\n start = self._floor_time(start, raster_size=tick_duration)\n end = self._ceil_time(end, raster_size=tick_duration)\n\n midnight = time(0, 0)\n for e in events:\n t = e[\"time\"] or time(0, 0)\n e[\"offset_shift_start\"] = 0\n if e[\"continued\"]:\n e[\"time_rastered\"] = midnight\n elif t.minute % raster_size:\n e[\"time_rastered\"] = t.replace(minute=(t.minute // raster_size) * raster_size)\n e[\"offset_shift_start\"] = t.minute % raster_size\n else:\n e[\"time_rastered\"] = t\n\n e[\"offset_shift_end\"] = 0\n if \"time_end_today\" in e and e[\"time_end_today\"]:\n if e[\"time_end_today\"].minute % raster_size:\n minute = math.ceil(e[\"time_end_today\"].minute / raster_size) * raster_size\n hour = e[\"time_end_today\"].hour\n if minute > 59:\n minute = minute % 60\n hour = (hour + 1) % 24\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"].replace(minute=minute, hour=hour)\n e[\"offset_shift_end\"] = raster_size - e[\"time_end_today\"].minute % raster_size\n else:\n e[\"time_end_today_rastered\"] = e[\"time_end_today\"]\n else:\n e[\"time_end_today\"] = e[\"time_end_today_rastered\"] = time(0, 0)\n\n e[\"duration_rastered\"] = self._format_duration(datetime.combine(\n self.date if e[\"time_end_today_rastered\"] != midnight else self.date + timedelta(days=1),\n e[\"time_end_today_rastered\"]\n ) - datetime.combine(\n self.date,\n e['time_rastered']\n ))\n\n e[\"offset_rastered\"] = datetime.combine(self.date, time(0, 0)) + self._get_time_duration(start, e[\"time_rastered\"])\n\n rastered_events.append(e)\n\n return rastered_events, start, end\n\n def _get_shortest_duration(self, events):\n midnight = time(0, 0)\n durations = [\n datetime.combine(\n self.date if e.get('time_end_today') and e['time_end_today'] != midnight else self.date + timedelta(days=1),\n e['time_end_today'] if e.get('time_end_today') else time(0, 0)\n )\n -\n datetime.combine(\n self.date,\n time(0, 0) if e['continued'] else (e['time'] or time(0, 0))\n )\n for e in events\n ]\n return min([d for d in durations])\n\n def _get_time_range(self, events):\n if any(e['continued'] for e in events) or any(e['time'] is None for e in events):\n starting_at = time(0, 0)\n else:\n starting_at = min(e['time'] for e in events)\n\n if any(e.get('time_end_today') is None for e in events):\n ending_at = time(0, 0)\n else:\n ending_at = max(e['time_end_today'] for e in events)\n\n return starting_at, ending_at\n\n def _get_time_ticks(self, start, end, tick_duration):\n ticks = []\n tick_duration = timedelta(minutes=tick_duration)\n\n # convert time to datetime for timedelta calc\n start = datetime.combine(self.date, start)\n end = datetime.combine(self.date, end)\n if end <= start:\n end = end + timedelta(days=1)\n\n tick_start = start\n offset = datetime.utcfromtimestamp(0)\n duration = datetime.utcfromtimestamp(tick_duration.total_seconds())\n while tick_start < end:\n tick = {\n \"start\": tick_start,\n \"duration\": duration,\n \"offset\": offset,\n }\n ticks.append(tick)\n tick_start += tick_duration\n offset += tick_duration\n\n return ticks\n\n def _grid_for_template(self, events):\n midnight = time(0, 0)\n rows_by_collection = defaultdict(list)\n\n # We sort the events into \"collections\": all subevents from the same\n # event series together and all non-series events into a \"None\"\n # collection. Then, we look if there's already an event in the\n # collection that overlaps, in which case we need to split the\n # collection into multiple rows.\n for counter, e in enumerate(events):\n collection = e['event'].event if isinstance(e['event'], SubEvent) else None\n\n placed_in_row = False\n for row in rows_by_collection[collection]:\n if any(\n (e['time_rastered'] < o['time_end_today_rastered'] or o['time_end_today_rastered'] == midnight) and\n (o['time_rastered'] < e['time_end_today_rastered'] or e['time_end_today_rastered'] == midnight)\n for o in row\n ):\n continue\n row.append(e)\n placed_in_row = True\n break\n\n if not placed_in_row:\n rows_by_collection[collection].append([e])\n\n # flatten rows to one stream of events with attribute row\n # for better keyboard-tab-order in html\n for collection in rows_by_collection:\n for i, row in enumerate(rows_by_collection[collection]):\n concurrency = i + 1\n for e in row:\n e[\"concurrency\"] = concurrency\n rows_by_collection[collection] = {\n \"concurrency\": len(rows_by_collection[collection]),\n \"events\": sorted([e for row in rows_by_collection[collection] for e in row], key=lambda d: d['time'] or time(0, 0)),\n }\n\n def sort_key(c):\n collection, row = c\n if collection is None:\n return ''\n else:\n return str(collection.name)\n return sorted(rows_by_collection.items(), key=sort_key)\n\n def _events_by_day(self, before, after):\n ebd = defaultdict(list)\n timezones = set()\n add_events_for_days(self.request, Event.annotated(self.request.organizer.events, 'web').using(\n settings.DATABASE_REPLICA\n ).filter(\n sales_channels__contains=self.request.sales_channel.identifier\n ), before, after, ebd, timezones)\n add_subevents_for_days(filter_qs_by_attr(SubEvent.annotated(SubEvent.objects.filter(\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n )), self.request).using(settings.DATABASE_REPLICA), before, after, ebd, timezones)\n self._multiple_timezones = len(timezones) > 1\n return ebd\n\n\n@method_decorator(cache_page(300), name='dispatch')\nclass OrganizerIcalDownload(OrganizerViewMixin, View):\n def get(self, request, *args, **kwargs):\n cutoff = now() - timedelta(days=31)\n events = list(\n filter_qs_by_attr(\n self.request.organizer.events.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n is_public=True,\n live=True,\n has_subevents=False,\n sales_channels__contains=self.request.sales_channel.identifier,\n ),\n request\n ).order_by(\n 'date_from'\n ).prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n events += list(\n filter_qs_by_attr(\n SubEvent.objects.filter(\n Q(date_from__gt=cutoff) | Q(date_to__gt=cutoff),\n event__organizer=self.request.organizer,\n event__is_public=True,\n event__live=True,\n is_public=True,\n active=True,\n event__sales_channels__contains=self.request.sales_channel.identifier\n ),\n request\n ).prefetch_related(\n Prefetch(\n 'event',\n queryset=Event.objects.prefetch_related(\n '_settings_objects',\n Prefetch(\n 'organizer',\n queryset=Organizer.objects.prefetch_related('_settings_objects')\n )\n )\n )\n ).order_by(\n 'date_from'\n )\n )\n\n if 'locale' in request.GET and request.GET.get('locale') in dict(settings.LANGUAGES):\n with language(request.GET.get('locale'), self.request.organizer.settings.region):\n cal = get_public_ical(events)\n else:\n cal = get_public_ical(events)\n\n resp = HttpResponse(cal.serialize(), content_type='text/calendar')\n resp['Content-Disposition'] = 'attachment; filename=\"{}.ics\"'.format(\n request.organizer.slug\n )\n if request.organizer.settings.meta_noindex:\n resp['X-Robots-Tag'] = 'noindex'\n return resp\n",
"path": "src/pretix/presale/views/organizer.py"
}
] | diff --git a/src/pretix/presale/views/organizer.py b/src/pretix/presale/views/organizer.py
index af56eb5f808..1a5d96ee9ae 100644
--- a/src/pretix/presale/views/organizer.py
+++ b/src/pretix/presale/views/organizer.py
@@ -225,7 +225,7 @@ def _set_month_to_next_event(self):
def _set_month_year(self):
if 'date' in self.request.GET:
try:
- date = dateutil.parser.parse(self.request.GET.get('date')).date()
+ date = dateutil.parser.isoparse(self.request.GET.get('date')).date()
except ValueError:
date = now().date()
self.year = date.year
diff --git a/src/tests/presale/test_event.py b/src/tests/presale/test_event.py
index 89a46658b61..d3e99a20f9b 100644
--- a/src/tests/presale/test_event.py
+++ b/src/tests/presale/test_event.py
@@ -314,7 +314,7 @@ def test_subevent_calendar(self):
resp = self.client.get('/%s/%s/' % (self.orga.slug, self.event.slug))
self.assertIn("Foo SE2", resp.rendered_content)
self.assertNotIn("Foo SE1", resp.rendered_content)
- resp = self.client.get('/%s/%s/?date=%d-%d' % (self.orga.slug, self.event.slug, se1.date_from.year, se1.date_from.month))
+ resp = self.client.get('/%s/%s/?date=%d-%02d' % (self.orga.slug, self.event.slug, se1.date_from.year, se1.date_from.month))
self.assertIn("Foo SE1", resp.rendered_content)
self.assertNotIn("Foo SE2", resp.rendered_content)
| Cannot change month in the widget
In the current version it is not possible to change the month in the widget. When you hit it, it reloads but does nothing. The cause seems to be because the call https://XXXXX/widget/product_list?lang=es&year=2022&month=03 always returns the same regardless of the value you put in the month parameter.
|
getsentry__sentry-21581 | [
{
"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else None\nMAX_RETRIES = 1\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n",
"path": "src/sentry/tasks/deletion.py"
}
] | [
{
"content": "from __future__ import absolute_import\n\nfrom uuid import uuid4\n\nfrom django.apps import apps\nfrom django.conf import settings\nfrom django.db import transaction\nfrom django.utils import timezone\n\nfrom sentry.constants import ObjectStatus\nfrom sentry.exceptions import DeleteAborted\nfrom sentry.signals import pending_delete\nfrom sentry.tasks.base import instrumented_task, retry, track_group_async_operation\n\n# in prod we run with infinite retries to recover from errors\n# in debug/development, we assume these tasks generally shouldn't fail\nMAX_RETRIES = 1 if settings.DEBUG else 5\n\n\n@instrumented_task(name=\"sentry.tasks.deletion.run_scheduled_deletions\", queue=\"cleanup\")\ndef run_scheduled_deletions():\n from sentry.models import ScheduledDeletion\n\n queryset = ScheduledDeletion.objects.filter(\n in_progress=False, aborted=False, date_scheduled__lte=timezone.now()\n )\n for item in queryset:\n with transaction.atomic():\n affected = ScheduledDeletion.objects.filter(\n id=item.id, in_progress=False, aborted=False\n ).update(in_progress=True)\n if not affected:\n continue\n\n run_deletion.delay(deletion_id=item.id)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.run_deletion\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef run_deletion(deletion_id):\n from sentry import deletions\n from sentry.models import ScheduledDeletion\n\n try:\n deletion = ScheduledDeletion.objects.get(id=deletion_id)\n except ScheduledDeletion.DoesNotExist:\n return\n\n if deletion.aborted:\n raise DeleteAborted\n\n if not deletion.in_progress:\n actor = deletion.get_actor()\n instance = deletion.get_instance()\n with transaction.atomic():\n deletion.update(in_progress=True)\n pending_delete.send(sender=type(instance), instance=instance, actor=actor)\n\n task = deletions.get(\n model=deletion.get_model(),\n query={\"id\": deletion.object_id},\n transaction_id=deletion.guid,\n actor_id=deletion.actor_id,\n )\n has_more = task.chunk()\n if has_more:\n run_deletion.apply_async(kwargs={\"deletion_id\": deletion_id}, countdown=15)\n deletion.delete()\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.revoke_api_tokens\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef revoke_api_tokens(object_id, transaction_id=None, timestamp=None, **kwargs):\n from sentry.models import ApiToken\n\n queryset = ApiToken.objects.filter(application=object_id)\n if timestamp:\n queryset = queryset.filter(date_added__lte=timestamp)\n\n # we're using a slow deletion strategy to avoid a lot of custom code for\n # postgres\n has_more = False\n for obj in queryset[:1000]:\n obj.delete()\n has_more = True\n\n if has_more:\n revoke_api_tokens.apply_async(\n kwargs={\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"timestamp\": timestamp,\n },\n countdown=15,\n )\n return has_more\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Organization, OrganizationStatus\n\n try:\n instance = Organization.objects.get(id=object_id)\n except Organization.DoesNotExist:\n return\n\n if instance.status == OrganizationStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != OrganizationStatus.DELETION_IN_PROGRESS:\n pending_delete.send(sender=type(instance), instance=instance)\n\n task = deletions.get(\n model=Organization,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n actor_id=actor_id,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_team\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_team(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Team, TeamStatus\n\n try:\n instance = Team.objects.get(id=object_id)\n except Team.DoesNotExist:\n return\n\n if instance.status == TeamStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Team, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_team.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_project\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_project(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Project, ProjectStatus\n\n try:\n instance = Project.objects.get(id=object_id)\n except Project.DoesNotExist:\n return\n\n if instance.status == ProjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=Project, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_project.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_groups\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\n@track_group_async_operation\ndef delete_groups(object_ids, transaction_id=None, eventstream_state=None, **kwargs):\n from sentry import deletions, eventstream\n from sentry.models import Group\n\n transaction_id = transaction_id or uuid4().hex\n\n max_batch_size = 100\n current_batch, rest = object_ids[:max_batch_size], object_ids[max_batch_size:]\n\n task = deletions.get(\n model=Group, query={\"id__in\": current_batch}, transaction_id=transaction_id\n )\n has_more = task.chunk()\n if has_more or rest:\n delete_groups.apply_async(\n kwargs={\n \"object_ids\": object_ids if has_more else rest,\n \"transaction_id\": transaction_id,\n \"eventstream_state\": eventstream_state,\n },\n countdown=15,\n )\n else:\n # all groups have been deleted\n if eventstream_state:\n eventstream.end_delete_groups(eventstream_state)\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_api_application\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_api_application(object_id, transaction_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import ApiApplication, ApiApplicationStatus\n\n try:\n instance = ApiApplication.objects.get(id=object_id)\n except ApiApplication.DoesNotExist:\n return\n\n if instance.status == ApiApplicationStatus.active:\n raise DeleteAborted\n\n task = deletions.get(\n model=ApiApplication, query={\"id\": object_id}, transaction_id=transaction_id or uuid4().hex\n )\n has_more = task.chunk()\n if has_more:\n delete_api_application.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id}, countdown=15\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.generic_delete\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef generic_delete(app_label, model_name, object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import User\n\n model = apps.get_model(app_label, model_name)\n\n try:\n instance = model.objects.get(id=object_id)\n except model.DoesNotExist:\n return\n\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n task = deletions.get(\n model=model,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n generic_delete.apply_async(\n kwargs={\n \"app_label\": app_label,\n \"model_name\": model_name,\n \"object_id\": object_id,\n \"transaction_id\": transaction_id,\n \"actor_id\": actor_id,\n },\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_repository\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_repository(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import Repository, User\n\n try:\n instance = Repository.objects.get(id=object_id)\n except Repository.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # compat: can be removed after we switch to scheduled deletions\n if instance.status != ObjectStatus.DELETION_IN_PROGRESS:\n pending_delete.send(\n sender=type(instance),\n instance=instance,\n actor=User.objects.get(id=actor_id) if actor_id else None,\n )\n\n task = deletions.get(\n model=Repository,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_repository.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n\n\n@instrumented_task(\n name=\"sentry.tasks.deletion.delete_organization_integration\",\n queue=\"cleanup\",\n default_retry_delay=60 * 5,\n max_retries=MAX_RETRIES,\n)\n@retry(exclude=(DeleteAborted,))\ndef delete_organization_integration(object_id, transaction_id=None, actor_id=None, **kwargs):\n from sentry import deletions\n from sentry.models import OrganizationIntegration, Repository\n\n try:\n instance = OrganizationIntegration.objects.get(id=object_id)\n except OrganizationIntegration.DoesNotExist:\n return\n\n if instance.status == ObjectStatus.VISIBLE:\n raise DeleteAborted\n\n # dissociate repos from that integration\n Repository.objects.filter(\n organization_id=instance.organization_id, integration_id=instance.integration_id\n ).update(integration_id=None)\n\n task = deletions.get(\n model=OrganizationIntegration,\n actor_id=actor_id,\n query={\"id\": object_id},\n transaction_id=transaction_id or uuid4().hex,\n )\n has_more = task.chunk()\n if has_more:\n delete_organization_integration.apply_async(\n kwargs={\"object_id\": object_id, \"transaction_id\": transaction_id, \"actor_id\": actor_id},\n countdown=15,\n )\n",
"path": "src/sentry/tasks/deletion.py"
}
] | diff --git a/src/sentry/tasks/deletion.py b/src/sentry/tasks/deletion.py
index 47b44cdd4ee3c8..3678f5d0c7e53f 100644
--- a/src/sentry/tasks/deletion.py
+++ b/src/sentry/tasks/deletion.py
@@ -14,8 +14,7 @@
# in prod we run with infinite retries to recover from errors
# in debug/development, we assume these tasks generally shouldn't fail
-MAX_RETRIES = 1 if settings.DEBUG else None
-MAX_RETRIES = 1
+MAX_RETRIES = 1 if settings.DEBUG else 5
@instrumented_task(name="sentry.tasks.deletion.run_scheduled_deletions", queue="cleanup")
| Hardcoded MAX_RETRIES = 1
https://github.com/getsentry/sentry/blob/master/src/sentry/tasks/deletion.py#L18
|
oppia__oppia-1713 | [
{
"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n",
"path": "extensions/rules/code_evaluation.py"
}
] | [
{
"content": "# coding: utf-8\n#\n# Copyright 2014 The Oppia Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, softwar\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Rules for CodeEvaluation objects.\"\"\"\n\nfrom extensions.rules import base\n\n\nclass CodeEquals(base.CodeEvaluationRule):\n description = 'has code equal to {{x|CodeString}}'\n\n\nclass CodeContains(base.CodeEvaluationRule):\n description = 'has code that contains {{x|CodeString}}'\n\n\nclass CodeDoesNotContain(base.CodeEvaluationRule):\n description = 'has code that does not contain {{x|CodeString}}'\n\nclass OutputContains(base.CodeEvaluationRule):\n description = 'has output that contains {{x|CodeString}}'\n\nclass OutputEquals(base.CodeEvaluationRule):\n description = 'has output equal to {{x|CodeString}}'\n\n\nclass ResultsInError(base.CodeEvaluationRule):\n description = 'results in an error when run'\n\n\nclass ErrorContains(base.CodeEvaluationRule):\n description = (\n 'has error message that contains {{x|UnicodeString}}')\n",
"path": "extensions/rules/code_evaluation.py"
}
] | diff --git a/core/domain/rule_domain_test.py b/core/domain/rule_domain_test.py
index 5afcf9a036f7d..ced4e9429bd7f 100644
--- a/core/domain/rule_domain_test.py
+++ b/core/domain/rule_domain_test.py
@@ -26,7 +26,7 @@
import feconf
-EXPECTED_TOTAL_NUMBER_OF_RULES = 47
+EXPECTED_TOTAL_NUMBER_OF_RULES = 48
class FakeRule(rule_domain.Rule):
diff --git a/extensions/interactions/CodeRepl/CodeRepl.js b/extensions/interactions/CodeRepl/CodeRepl.js
index fc5b8420d9bd6..d88f035b07ecf 100644
--- a/extensions/interactions/CodeRepl/CodeRepl.js
+++ b/extensions/interactions/CodeRepl/CodeRepl.js
@@ -268,6 +268,11 @@ oppia.factory('codeReplRulesService', [
codeNormalizationService.getNormalizedCode(inputs.x);
return normalizedCode.indexOf(normalizedSnippet) == -1;
},
+ OutputContains: function(answer, inputs) {
+ var normalizedOutput = $filter('normalizeWhitespace')(answer.output);
+ var normalizedSnippet = $filter('normalizeWhitespace')(inputs.x);
+ return normalizedOutput.indexOf(normalizedSnippet) != -1;
+ },
OutputEquals: function(answer, inputs) {
var normalizedOutput = $filter('normalizeWhitespace')(answer.output);
var normalizedExpectedOutput =
diff --git a/extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js b/extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js
index c0c81a08d1323..082955c3b25c7 100644
--- a/extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js
+++ b/extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js
@@ -148,6 +148,65 @@ describe('Code REPL rules service', function() {
});
});
+ describe('\'output contains\' rule', function() {
+ var RULE_INPUT = {
+ x: '1'
+ };
+
+ var RULE_INPUT_1 = {
+ x: 'a b c'
+ };
+
+ var RULE_INPUT_2 = {
+ x: 'a\nb\nc'
+ };
+
+ it('should check if output contains some content', function() {
+ expect(crrs.OutputContains({
+ output: '1 2 3 4'
+ }, RULE_INPUT)).toBe(true);
+ expect(crrs.OutputContains({
+ output: '\n1\n2\n3\n4\n'
+ }, RULE_INPUT)).toBe(true);
+ expect(crrs.OutputContains({
+ output: ''
+ }, RULE_INPUT)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'bad output'
+ }, RULE_INPUT)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'a b c d e'
+ }, RULE_INPUT_1)).toBe(true);
+ expect(crrs.OutputContains({
+ output: 'a\nb\nc\nd\n'
+ }, RULE_INPUT_1)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'ab\nc\n'
+ }, RULE_INPUT_1)).toBe(false);
+ expect(crrs.OutputContains({
+ output: ''
+ }, RULE_INPUT_1)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'bad output'
+ }, RULE_INPUT_1)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'a\nb\nc\nd\ne'
+ }, RULE_INPUT_2)).toBe(true);
+ expect(crrs.OutputContains({
+ output: '\nabc\ndef\nfgh\n'
+ }, RULE_INPUT_2)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'a b c'
+ }, RULE_INPUT_2)).toBe(false);
+ expect(crrs.OutputContains({
+ output: ''
+ }, RULE_INPUT_2)).toBe(false);
+ expect(crrs.OutputContains({
+ output: 'bad output'
+ }, RULE_INPUT_2)).toBe(false);
+ });
+ });
+
describe('\'output equals\' rule', function() {
var RULE_INPUT = {
x: '1'
diff --git a/extensions/rules/code_evaluation.py b/extensions/rules/code_evaluation.py
index 829a9a97c2a6e..0055a7c02526a 100644
--- a/extensions/rules/code_evaluation.py
+++ b/extensions/rules/code_evaluation.py
@@ -30,6 +30,8 @@ class CodeContains(base.CodeEvaluationRule):
class CodeDoesNotContain(base.CodeEvaluationRule):
description = 'has code that does not contain {{x|CodeString}}'
+class OutputContains(base.CodeEvaluationRule):
+ description = 'has output that contains {{x|CodeString}}'
class OutputEquals(base.CodeEvaluationRule):
description = 'has output equal to {{x|CodeString}}'
| Add an OutputContains rule to the CodeRepl interaction.
We've had a request to add an OutputContains rule to the CodeRepl interaction.
The use case is as follows: the student will type in the body of a function, and their code will be checked by calling the function on several inputs and printing the results. We don't want to stop the student from printing their own stuff from the function first, though, hence the idea of checking to see whether a substring of the student's output matches the expected output.
Note that this is a straightforward starter project. The files to modify are extensions/interactions/CodeRepl/CodeRepl.js (see codeReplRulesService) and the corresponding test suite in extensions/interactions/CodeRepl/CodeReplRulesServiceSpec.js.
/cc @anuzis
|
optuna__optuna-56 | [
{
"content": "import os\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\n\ndef get_version():\n version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ntests_require = ['pytest', 'hacking', 'mock']\nif sys.version_info[0] == 3:\n tests_require.append('mypy')\n\n\nsetup(\n name='pfnopt',\n version=get_version(),\n description='',\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n 'console_scripts': ['pfnopt = pfnopt.cli:main'],\n 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']\n }\n)\n",
"path": "setup.py"
}
] | [
{
"content": "import os\nfrom setuptools import find_packages\nfrom setuptools import setup\nimport sys\n\n\ndef get_version():\n version_filepath = os.path.join(os.path.dirname(__file__), 'pfnopt', 'version.py')\n with open(version_filepath) as f:\n for line in f:\n if line.startswith('__version__'):\n return line.strip().split()[-1][1:-1]\n assert False\n\n\ntests_require = ['pytest', 'hacking', 'mock']\nif sys.version_info[0] == 3:\n tests_require.append('mypy')\n\n\nsetup(\n name='pfnopt',\n version=get_version(),\n description='',\n author='Takuya Akiba',\n author_email='[email protected]',\n packages=find_packages(),\n install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],\n tests_require=tests_require,\n extras_require={'testing': tests_require},\n entry_points={\n 'console_scripts': ['pfnopt = pfnopt.cli:main'],\n 'pfnopt.command': ['mkstudy = pfnopt.cli:MakeStudy']\n }\n)\n",
"path": "setup.py"
}
] | diff --git a/setup.py b/setup.py
index 37a5afdda7..909c5da927 100644
--- a/setup.py
+++ b/setup.py
@@ -25,7 +25,7 @@ def get_version():
author='Takuya Akiba',
author_email='[email protected]',
packages=find_packages(),
- install_requires=['sqlalchemy', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],
+ install_requires=['sqlalchemy>=1.1.0', 'numpy', 'scipy', 'six', 'typing', 'enum34', 'cliff'],
tests_require=tests_require,
extras_require={'testing': tests_require},
entry_points={
| Incompatibility with old versions of SQLAlchemy.
Connecting to PostgreSQL fails with old versions of SQLAlchemy raising an error: `sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name`. This error is resolved once sqlalchemy version is updated.
For example:
```python
>>> import sqlalchemy
>>> sqlalchemy.__version__
'1.0.13'
>>> from pfnopt.storages import RDBStorage
>>> RDBStorage(url='postgresql://pfnopt:somepassword@localhost:5432/some_db')
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/Users/sano/PycharmProjects/pfnopt/pfnopt/storages/rdb.py", line 85, in __init__
Base.metadata.create_all(self.engine)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/schema.py", line 3695, in create_all
tables=tables)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1856, in _run_visitor
conn._run_visitor(visitorcallable, element, **kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 1481, in _run_visitor
**kwargs).traverse_single(element)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 121, in traverse_single
return meth(obj, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 720, in visit_metadata
_ddl_runner=self)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/event/attr.py", line 256, in __call__
fn(*args, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/util/langhelpers.py", line 546, in __call__
return getattr(self.target, self.name)(*arg, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/sqltypes.py", line 1040, in _on_metadata_create
t._on_metadata_create(target, bind, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1379, in _on_metadata_create
self.create(bind=bind, checkfirst=checkfirst)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1317, in create
bind.execute(CreateEnumType(self))
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 914, in execute
return meth(self, multiparams, params)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 68, in _execute_on_connection
return connection._execute_ddl(self, multiparams, params)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/engine/base.py", line 962, in _execute_ddl
compiled = ddl.compile(dialect=dialect)
File "<string>", line 1, in <lambda>
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/elements.py", line 494, in compile
return self._compiler(dialect, bind=bind, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/ddl.py", line 26, in _compiler
return dialect.ddl_compiler(dialect, self, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 190, in __init__
self.string = self.process(self.statement, **compile_kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/compiler.py", line 213, in process
return obj._compiler_dispatch(self, **kwargs)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/sql/visitors.py", line 81, in _compiler_dispatch
return meth(self, **kw)
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1613, in visit_create_enum_type
self.preparer.format_type(type_),
File "/Users/sano/anaconda3/envs/pfnopt-35/lib/python3.5/site-packages/sqlalchemy/dialects/postgresql/base.py", line 1857, in format_type
raise exc.CompileError("Postgresql ENUM type requires a name.")
sqlalchemy.exc.CompileError: Postgresql ENUM type requires a name.
```
|
freedomofpress__securedrop-4391 | [
{
"content": "import operator\nimport os\nimport io\nimport six\n\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n session['codename'] = codename\n session['new_user'] = True\n return render_template('generate.html', codename=codename)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n filesystem_id = current_app.crypto_util.hash_codename(\n session['codename'])\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n if six.PY2: # Python2\n reply.decrypted = reply_obj.decode('utf-8')\n else:\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.getkey(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.getkey(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n msg = request.form['msg']\n fh = None\n if 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n",
"path": "securedrop/source_app/main.py"
}
] | [
{
"content": "import operator\nimport os\nimport io\nimport six\n\nfrom datetime import datetime\nfrom flask import (Blueprint, render_template, flash, redirect, url_for, g,\n session, current_app, request, Markup, abort)\nfrom flask_babel import gettext\nfrom sqlalchemy.exc import IntegrityError\n\nimport store\n\nfrom db import db\nfrom models import Source, Submission, Reply, get_one_or_else\nfrom source_app.decorators import login_required\nfrom source_app.utils import (logged_in, generate_unique_codename,\n async_genkey, normalize_timestamps,\n valid_codename, get_entropy_estimate)\nfrom source_app.forms import LoginForm\n\n\ndef make_blueprint(config):\n view = Blueprint('main', __name__)\n\n @view.route('/')\n def index():\n return render_template('index.html')\n\n @view.route('/generate', methods=('GET', 'POST'))\n def generate():\n if logged_in():\n flash(gettext(\n \"You were redirected because you are already logged in. \"\n \"If you want to create a new account, you should log out \"\n \"first.\"),\n \"notification\")\n return redirect(url_for('.lookup'))\n\n codename = generate_unique_codename(config)\n session['codename'] = codename\n session['new_user'] = True\n return render_template('generate.html', codename=codename)\n\n @view.route('/org-logo')\n def select_logo():\n if os.path.exists(os.path.join(current_app.static_folder, 'i',\n 'custom_logo.png')):\n return redirect(url_for('static', filename='i/custom_logo.png'))\n else:\n return redirect(url_for('static', filename='i/logo.png'))\n\n @view.route('/create', methods=['POST'])\n def create():\n filesystem_id = current_app.crypto_util.hash_codename(\n session['codename'])\n\n source = Source(filesystem_id, current_app.crypto_util.display_id())\n db.session.add(source)\n try:\n db.session.commit()\n except IntegrityError as e:\n db.session.rollback()\n current_app.logger.error(\n \"Attempt to create a source with duplicate codename: %s\" %\n (e,))\n\n # Issue 2386: don't log in on duplicates\n del session['codename']\n\n # Issue 4361: Delete 'logged_in' if it's in the session\n try:\n del session['logged_in']\n except KeyError:\n pass\n\n abort(500)\n else:\n os.mkdir(current_app.storage.path(filesystem_id))\n\n session['logged_in'] = True\n return redirect(url_for('.lookup'))\n\n @view.route('/lookup', methods=('GET',))\n @login_required\n def lookup():\n replies = []\n source_inbox = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n\n for reply in source_inbox:\n reply_path = current_app.storage.path(\n g.filesystem_id,\n reply.filename,\n )\n try:\n with io.open(reply_path, \"rb\") as f:\n contents = f.read()\n reply_obj = current_app.crypto_util.decrypt(g.codename, contents)\n if six.PY2: # Python2\n reply.decrypted = reply_obj.decode('utf-8')\n else:\n reply.decrypted = reply_obj\n except UnicodeDecodeError:\n current_app.logger.error(\"Could not decode reply %s\" %\n reply.filename)\n else:\n reply.date = datetime.utcfromtimestamp(\n os.stat(reply_path).st_mtime)\n replies.append(reply)\n\n # Sort the replies by date\n replies.sort(key=operator.attrgetter('date'), reverse=True)\n\n # Generate a keypair to encrypt replies from the journalist\n # Only do this if the journalist has flagged the source as one\n # that they would like to reply to. (Issue #140.)\n if not current_app.crypto_util.getkey(g.filesystem_id) and \\\n g.source.flagged:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n\n return render_template(\n 'lookup.html',\n codename=g.codename,\n replies=replies,\n flagged=g.source.flagged,\n new_user=session.get('new_user', None),\n haskey=current_app.crypto_util.getkey(\n g.filesystem_id))\n\n @view.route('/submit', methods=('POST',))\n @login_required\n def submit():\n msg = request.form['msg']\n fh = None\n if 'fh' in request.files:\n fh = request.files['fh']\n\n # Don't submit anything if it was an \"empty\" submission. #878\n if not (msg or fh):\n flash(gettext(\n \"You must enter a message or choose a file to submit.\"),\n \"error\")\n return redirect(url_for('main.lookup'))\n\n fnames = []\n journalist_filename = g.source.journalist_filename\n first_submission = g.source.interaction_count == 0\n\n if msg:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_message_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n msg))\n if fh:\n g.source.interaction_count += 1\n fnames.append(\n current_app.storage.save_file_submission(\n g.filesystem_id,\n g.source.interaction_count,\n journalist_filename,\n fh.filename,\n fh.stream))\n\n if first_submission:\n msg = render_template('first_submission_flashed_message.html')\n flash(Markup(msg), \"success\")\n\n else:\n if msg and not fh:\n html_contents = gettext('Thanks! We received your message.')\n elif not msg and fh:\n html_contents = gettext('Thanks! We received your document.')\n else:\n html_contents = gettext('Thanks! We received your message and '\n 'document.')\n\n msg = render_template('next_submission_flashed_message.html',\n html_contents=html_contents)\n flash(Markup(msg), \"success\")\n\n new_submissions = []\n for fname in fnames:\n submission = Submission(g.source, fname)\n db.session.add(submission)\n new_submissions.append(submission)\n\n if g.source.pending:\n g.source.pending = False\n\n # Generate a keypair now, if there's enough entropy (issue #303)\n # (gpg reads 300 bytes from /dev/random)\n entropy_avail = get_entropy_estimate()\n if entropy_avail >= 2400:\n db_uri = current_app.config['SQLALCHEMY_DATABASE_URI']\n\n async_genkey(current_app.crypto_util,\n db_uri,\n g.filesystem_id,\n g.codename)\n current_app.logger.info(\"generating key, entropy: {}\".format(\n entropy_avail))\n else:\n current_app.logger.warn(\n \"skipping key generation. entropy: {}\".format(\n entropy_avail))\n\n g.source.last_updated = datetime.utcnow()\n db.session.commit()\n\n for sub in new_submissions:\n store.async_add_checksum_for_file(sub)\n\n normalize_timestamps(g.filesystem_id)\n\n return redirect(url_for('main.lookup'))\n\n @view.route('/delete', methods=('POST',))\n @login_required\n def delete():\n \"\"\"This deletes the reply from the source's inbox, but preserves\n the history for journalists such that they can view conversation\n history.\n \"\"\"\n\n query = Reply.query.filter_by(\n filename=request.form['reply_filename'],\n source_id=g.source.id)\n reply = get_one_or_else(query, current_app.logger, abort)\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"Reply deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/delete-all', methods=('POST',))\n @login_required\n def batch_delete():\n replies = Reply.query.filter(Reply.source_id == g.source.id) \\\n .filter(Reply.deleted_by_source == False).all() # noqa\n if len(replies) == 0:\n current_app.logger.error(\"Found no replies when at least one was \"\n \"expected\")\n return redirect(url_for('.lookup'))\n\n for reply in replies:\n reply.deleted_by_source = True\n db.session.add(reply)\n db.session.commit()\n\n flash(gettext(\"All replies have been deleted\"), \"notification\")\n return redirect(url_for('.lookup'))\n\n @view.route('/login', methods=('GET', 'POST'))\n def login():\n form = LoginForm()\n if form.validate_on_submit():\n codename = request.form['codename'].strip()\n if valid_codename(codename):\n session.update(codename=codename, logged_in=True)\n return redirect(url_for('.lookup', from_login='1'))\n else:\n current_app.logger.info(\n \"Login failed for invalid codename\")\n flash(gettext(\"Sorry, that is not a recognized codename.\"),\n \"error\")\n return render_template('login.html', form=form)\n\n @view.route('/logout')\n def logout():\n if logged_in():\n msg = render_template('logout_flashed_message.html')\n\n # Clear the session after we render the message so it's localized\n # If a user specified a locale, save it and restore it\n user_locale = g.locale\n session.clear()\n session['locale'] = user_locale\n\n flash(Markup(msg), \"important hide-if-not-tor-browser\")\n return redirect(url_for('.index'))\n\n return view\n",
"path": "securedrop/source_app/main.py"
}
] | diff --git a/securedrop/source_app/main.py b/securedrop/source_app/main.py
index 91481f3157..1ffbaa7ddd 100644
--- a/securedrop/source_app/main.py
+++ b/securedrop/source_app/main.py
@@ -67,6 +67,13 @@ def create():
# Issue 2386: don't log in on duplicates
del session['codename']
+
+ # Issue 4361: Delete 'logged_in' if it's in the session
+ try:
+ del session['logged_in']
+ except KeyError:
+ pass
+
abort(500)
else:
os.mkdir(current_app.storage.path(filesystem_id))
diff --git a/securedrop/tests/test_source.py b/securedrop/tests/test_source.py
index 4499054642..ea9a04a847 100644
--- a/securedrop/tests/test_source.py
+++ b/securedrop/tests/test_source.py
@@ -146,7 +146,31 @@ def test_generate_too_long_codename(source_app):
)
-def test_create_duplicate_codename(source_app):
+def test_create_duplicate_codename_logged_in_not_in_session(source_app):
+ with patch.object(source.app.logger, 'error') as logger:
+ with source_app.test_client() as app:
+ resp = app.get(url_for('main.generate'))
+ assert resp.status_code == 200
+
+ # Create a source the first time
+ resp = app.post(url_for('main.create'), follow_redirects=True)
+ assert resp.status_code == 200
+ codename = session['codename']
+
+ with source_app.test_client() as app:
+ # Attempt to add the same source
+ with app.session_transaction() as sess:
+ sess['codename'] = codename
+ resp = app.post(url_for('main.create'), follow_redirects=True)
+ logger.assert_called_once()
+ assert ("Attempt to create a source with duplicate codename"
+ in logger.call_args[0][0])
+ assert resp.status_code == 500
+ assert 'codename' not in session
+ assert 'logged_in' not in session
+
+
+def test_create_duplicate_codename_logged_in_in_session(source_app):
with patch.object(source.app.logger, 'error') as logger:
with source_app.test_client() as app:
resp = app.get(url_for('main.generate'))
@@ -157,12 +181,17 @@ def test_create_duplicate_codename(source_app):
assert resp.status_code == 200
# Attempt to add the same source
- app.post(url_for('main.create'), follow_redirects=True)
+ resp = app.post(url_for('main.create'), follow_redirects=True)
logger.assert_called_once()
assert ("Attempt to create a source with duplicate codename"
in logger.call_args[0][0])
+ assert resp.status_code == 500
assert 'codename' not in session
+ # Reproducer for bug #4361
+ resp = app.post(url_for('main.index'), follow_redirects=True)
+ assert 'logged_in' not in session
+
def test_lookup(source_app):
"""Test various elements on the /lookup page."""
| Source Interface requests fail with 500 error, due to session issue
## Description
In some situations, requests to the source interface may start to fail, returning the 500 error page. Once the 500 errors start, they continue until the Tor Browser cache is cleared, either explicitly or by starting a new browser session. With source error logging enabled, the following errors are seen on failing requests:
```
[Thu Apr 18 09:46:09.516056 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] [2019-04-18 09:46:09,510] ERROR in app: Exception on / [GET]
[Thu Apr 18 09:46:09.516238 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] Traceback (most recent call last):
[Thu Apr 18 09:46:09.516279 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2292, in wsgi_app
[Thu Apr 18 09:46:09.516317 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] response = self.full_dispatch_request()
[Thu Apr 18 09:46:09.516363 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1815, in full_dispatch_request
[Thu Apr 18 09:46:09.516442 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.handle_user_exception(e)
[Thu Apr 18 09:46:09.516479 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1718, in handle_user_exception
[Thu Apr 18 09:46:09.516514 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] reraise(exc_type, exc_value, tb)
[Thu Apr 18 09:46:09.516549 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 1811, in full_dispatch_request
[Thu Apr 18 09:46:09.516584 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = self.preprocess_request()
[Thu Apr 18 09:46:09.516619 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/app.py", line 2087, in preprocess_request
[Thu Apr 18 09:46:09.516654 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] rv = func()
[Thu Apr 18 09:46:09.516688 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/decorators.py", line 23, in decorated_function
[Thu Apr 18 09:46:09.516724 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return f(*args, **kwargs)
[Thu Apr 18 09:46:09.516758 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/var/www/securedrop/source_app/__init__.py", line 159, in setup_g
[Thu Apr 18 09:46:09.516793 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] g.codename = session['codename']
[Thu Apr 18 09:46:09.516828 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/werkzeug/local.py", line 377, in <lambda>
[Thu Apr 18 09:46:09.516864 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] __getitem__ = lambda x, i: x._get_current_object()[i]
[Thu Apr 18 09:46:09.516899 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] File "/usr/local/lib/python2.7/dist-packages/flask/sessions.py", line 83, in __getitem__
[Thu Apr 18 09:46:09.516933 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] return super(SecureCookieSession, self).__getitem__(key)
[Thu Apr 18 09:46:09.516968 2019] [wsgi:error] [pid 7324:tid 3457186817792] [remote 127.0.0.1:29169] KeyError: 'codename'
```
## Steps to Reproduce
This error was initially hit by:
1) starting a source interface session on a 0.12.1 Xenial install
2) updating the 0.12.1 Xenial install to 0.12.2~rc1 via cron-apt
3) running a 0.12.1 db restore against the 0.12.2 database
4) attempting to continue the source session.
It's also been reproduced during a test session by creating multiple sources and logging in and out repeatedly (h/t @eloquence ), but is not reliably reproducible.
## Expected Behavior
Source Interface requests for valid URLs return the correct results.
## Actual Behavior
SI requests all return 500 errors.
## Comments
|
microsoft__ptvsd-926 | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host')\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "ptvsd/__main__.py"
}
] | [
{
"content": "# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See LICENSE in the project root\n# for license information.\n\nimport argparse\nimport os.path\nimport sys\n\nfrom ptvsd._attach import attach_main\nfrom ptvsd._local import debug_main, run_main\nfrom ptvsd.socket import Address\nfrom ptvsd.version import __version__, __author__ # noqa\n\n\n##################################\n# the script\n\n\"\"\"\nFor the PyDevd CLI handling see:\n\n https://github.com/fabioz/PyDev.Debugger/blob/master/_pydevd_bundle/pydevd_command_line_handling.py\n https://github.com/fabioz/PyDev.Debugger/blob/master/pydevd.py#L1450 (main func)\n\"\"\" # noqa\n\nPYDEVD_OPTS = {\n '--file',\n '--vm_type',\n}\n\nPYDEVD_FLAGS = {\n '--DEBUG',\n '--DEBUG_RECORD_SOCKET_READS',\n '--cmd-line',\n '--module',\n '--multiproc',\n '--multiprocess',\n '--print-in-debugger-startup',\n '--save-signatures',\n '--save-threading',\n '--save-asyncio',\n '--server',\n '--qt-support=auto',\n}\n\nUSAGE = \"\"\"\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT -m MODULE [arg ...]\n {0} [-h] [-V] [--nodebug] [--client] [--host HOST] --port PORT FILENAME [arg ...]\n {0} [-h] [-V] --host HOST --port PORT --pid PROCESS_ID\n\"\"\" # noqa\n\n\ndef parse_args(argv=None):\n \"\"\"Return the parsed args to use in main().\"\"\"\n if argv is None:\n argv = sys.argv\n prog = argv[0]\n if prog == __file__:\n prog = '{} -m ptvsd'.format(os.path.basename(sys.executable))\n else:\n prog = argv[0]\n argv = argv[1:]\n\n supported, pydevd, script = _group_args(argv)\n args = _parse_args(prog, supported)\n # '--' is used in _run_args to extract pydevd specific args\n extra = pydevd + ['--']\n if script:\n extra += script\n return args, extra\n\n\ndef _group_args(argv):\n supported = []\n pydevd = []\n script = []\n\n try:\n pos = argv.index('--')\n except ValueError:\n script = []\n else:\n script = argv[pos + 1:]\n argv = argv[:pos]\n\n for arg in argv:\n if arg == '-h' or arg == '--help':\n return argv, [], script\n\n gottarget = False\n skip = 0\n for i in range(len(argv)):\n if skip:\n skip -= 1\n continue\n\n arg = argv[i]\n try:\n nextarg = argv[i + 1]\n except IndexError:\n nextarg = None\n\n # TODO: Deprecate the PyDevd arg support.\n # PyDevd support\n if gottarget:\n script = argv[i:] + script\n break\n if arg == '--file':\n if nextarg is None: # The filename is missing...\n pydevd.append(arg)\n continue # This will get handled later.\n if nextarg.endswith(':') and '--module' in pydevd:\n pydevd.remove('--module')\n arg = '-m'\n argv[i + 1] = nextarg = nextarg[:-1]\n else:\n arg = nextarg\n skip += 1\n\n if arg in PYDEVD_OPTS:\n pydevd.append(arg)\n if nextarg is not None:\n pydevd.append(nextarg)\n skip += 1\n elif arg in PYDEVD_FLAGS:\n pydevd.append(arg)\n elif arg == '--nodebug':\n supported.append(arg)\n\n # ptvsd support\n elif arg in ('--host', '--port', '--pid', '-m'):\n if arg == '-m' or arg == '--pid':\n gottarget = True\n supported.append(arg)\n if nextarg is not None:\n supported.append(nextarg)\n skip += 1\n elif arg in ('--single-session', '--wait', '--client'):\n supported.append(arg)\n elif not arg.startswith('-'):\n supported.append(arg)\n gottarget = True\n\n # unsupported arg\n else:\n supported.append(arg)\n break\n\n return supported, pydevd, script\n\n\ndef _parse_args(prog, argv):\n parser = argparse.ArgumentParser(\n prog=prog,\n usage=USAGE.format(prog),\n )\n\n parser.add_argument('--nodebug', action='store_true')\n parser.add_argument('--client', action='store_true')\n\n parser.add_argument('--host', required=True)\n parser.add_argument('--port', type=int, required=True)\n\n target = parser.add_mutually_exclusive_group(required=True)\n target.add_argument('-m', dest='module')\n target.add_argument('--pid', type=int)\n target.add_argument('filename', nargs='?')\n\n parser.add_argument('--single-session', action='store_true')\n parser.add_argument('--wait', action='store_true')\n\n parser.add_argument('-V', '--version', action='version')\n parser.version = __version__\n\n args = parser.parse_args(argv)\n ns = vars(args)\n\n host = ns.pop('host', None)\n port = ns.pop('port')\n client = ns.pop('client')\n args.address = (Address.as_client if client else Address.as_server)(host, port) # noqa\n\n pid = ns.pop('pid')\n module = ns.pop('module')\n filename = ns.pop('filename')\n if pid is not None:\n args.name = pid\n args.kind = 'pid'\n elif module is not None:\n args.name = module\n args.kind = 'module'\n else:\n args.name = filename\n args.kind = 'script'\n\n return args\n\n\ndef handle_args(addr, name, kind, extra=(), nodebug=False, **kwargs):\n if kind == 'pid':\n attach_main(addr, name, *extra, **kwargs)\n elif nodebug:\n run_main(addr, name, kind, *extra, **kwargs)\n else:\n debug_main(addr, name, kind, *extra, **kwargs)\n\n\ndef main(argv=None):\n args, extra = parse_args(argv)\n handle_args(args.address, args.name, args.kind, extra,\n nodebug=args.nodebug, singlesession=args.single_session,\n wait=args.wait)\n\n\nif __name__ == '__main__':\n main()\n",
"path": "ptvsd/__main__.py"
}
] | diff --git a/ptvsd/__main__.py b/ptvsd/__main__.py
index 258942784..f6dc445a9 100644
--- a/ptvsd/__main__.py
+++ b/ptvsd/__main__.py
@@ -157,7 +157,7 @@ def _parse_args(prog, argv):
parser.add_argument('--nodebug', action='store_true')
parser.add_argument('--client', action='store_true')
- parser.add_argument('--host')
+ parser.add_argument('--host', required=True)
parser.add_argument('--port', type=int, required=True)
target = parser.add_mutually_exclusive_group(required=True)
diff --git a/tests/ptvsd/test___main__.py b/tests/ptvsd/test___main__.py
index ec4c72e76..51a8aa3a9 100644
--- a/tests/ptvsd/test___main__.py
+++ b/tests/ptvsd/test___main__.py
@@ -9,22 +9,13 @@ class ParseArgsTests(unittest.TestCase):
EXPECTED_EXTRA = ['--']
- def test_module(self):
- args, extra = parse_args([
- 'eggs',
- '--port', '8888',
- '-m', 'spam',
- ])
-
- self.assertEqual(vars(args), {
- 'kind': 'module',
- 'name': 'spam',
- 'address': Address.as_server(None, 8888),
- 'nodebug': False,
- 'single_session': False,
- 'wait': False,
- })
- self.assertEqual(extra, self.EXPECTED_EXTRA)
+ def test_host_required(self):
+ with self.assertRaises(SystemExit):
+ parse_args([
+ 'eggs',
+ '--port', '8888',
+ '-m', 'spam',
+ ])
def test_module_server(self):
args, extra = parse_args([
@@ -49,6 +40,7 @@ def test_module_nodebug(self):
'eggs',
'--nodebug',
'--client',
+ '--host', 'localhost',
'--port', '8888',
'-m', 'spam',
])
@@ -56,7 +48,7 @@ def test_module_nodebug(self):
self.assertEqual(vars(args), {
'kind': 'module',
'name': 'spam',
- 'address': Address.as_client(None, 8888),
+ 'address': Address.as_client('localhost', 8888),
'nodebug': True,
'single_session': False,
'wait': False,
@@ -66,6 +58,7 @@ def test_module_nodebug(self):
def test_script(self):
args, extra = parse_args([
'eggs',
+ '--host', 'localhost',
'--port', '8888',
'spam.py',
])
@@ -73,7 +66,7 @@ def test_script(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam.py',
- 'address': Address.as_server(None, 8888),
+ 'address': Address.as_server('localhost', 8888),
'nodebug': False,
'single_session': False,
'wait': False,
@@ -103,6 +96,7 @@ def test_script_nodebug(self):
'eggs',
'--nodebug',
'--client',
+ '--host', 'localhost',
'--port', '8888',
'spam.py',
])
@@ -110,7 +104,7 @@ def test_script_nodebug(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam.py',
- 'address': Address.as_client(None, 8888),
+ 'address': Address.as_client('localhost', 8888),
'nodebug': True,
'single_session': False,
'wait': False,
@@ -179,6 +173,7 @@ def test_remote_single_session(self):
args, extra = parse_args([
'eggs',
'--single-session',
+ '--host', 'localhost',
'--port', '8888',
'spam.py',
])
@@ -236,6 +231,7 @@ def test_extra(self):
args, extra = parse_args([
'eggs',
'--DEBUG',
+ '--host', 'localhost',
'--port', '8888',
'--vm_type', '???',
'spam.py',
@@ -251,7 +247,7 @@ def test_extra(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam.py',
- 'address': Address.as_server(None, 8888),
+ 'address': Address.as_server('localhost', 8888),
'nodebug': False,
'single_session': False,
'wait': False,
@@ -274,6 +270,7 @@ def test_extra_nodebug(self):
'--DEBUG',
'--nodebug',
'--client',
+ '--host', 'localhost',
'--port', '8888',
'--vm_type', '???',
'spam.py',
@@ -289,7 +286,7 @@ def test_extra_nodebug(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam.py',
- 'address': Address.as_client(None, 8888),
+ 'address': Address.as_client('localhost', 8888),
'nodebug': True,
'single_session': False,
'wait': False,
@@ -337,6 +334,7 @@ def test_unsupported_arg(self):
def test_pseudo_backward_compatibility(self):
args, extra = parse_args([
'eggs',
+ '--host', 'localhost',
'--port', '8888',
'--module',
'--file', 'spam',
@@ -345,7 +343,7 @@ def test_pseudo_backward_compatibility(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam',
- 'address': Address.as_server(None, 8888),
+ 'address': Address.as_server('localhost', 8888),
'nodebug': False,
'single_session': False,
'wait': False,
@@ -357,6 +355,7 @@ def test_pseudo_backward_compatibility_nodebug(self):
'eggs',
'--nodebug',
'--client',
+ '--host', 'localhost',
'--port', '8888',
'--module',
'--file', 'spam',
@@ -365,7 +364,7 @@ def test_pseudo_backward_compatibility_nodebug(self):
self.assertEqual(vars(args), {
'kind': 'script',
'name': 'spam',
- 'address': Address.as_client(None, 8888),
+ 'address': Address.as_client('localhost', 8888),
'nodebug': True,
'single_session': False,
'wait': False,
diff --git a/tests/system_tests/test_connection.py b/tests/system_tests/test_connection.py
index 730964943..b65a31ada 100644
--- a/tests/system_tests/test_connection.py
+++ b/tests/system_tests/test_connection.py
@@ -115,6 +115,7 @@ def connect(addr, wait=None, closeonly=False):
proc = Proc.start_python_module('ptvsd', [
'--server',
'--wait',
+ '--host', 'localhost',
'--port', '5678',
'--file', filename,
], env={
| Make --host a required switch
`--host` is currently optional, and defaults to `localhost`. The old behavior was to default to `0.0.0.0`, which is not a particularly sane default. However, the new default makes things confusing, since it is applied silently - things just work differently. Changing the switch to be explicit solves that problem, while also forcing the user to consider the security implications of either choice.
|
ResonantGeoData__ResonantGeoData-470 | [
{
"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n return list(limit_offset_pager(self.session, 'rgd/search', params=params))\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n",
"path": "rgd-client/rgd_client/rgdc.py"
}
] | [
{
"content": "from base64 import b64encode\nfrom dataclasses import dataclass\nimport getpass\nfrom pathlib import Path\nimport tempfile\nfrom typing import Dict, Iterator, List, Optional, Tuple, Union\n\nfrom tqdm import tqdm\n\nfrom .session import RgdcSession\nfrom .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE\nfrom .utils import (\n DEFAULT_RGD_API,\n download_checksum_file_to_path,\n limit_offset_pager,\n spatial_search_params,\n spatial_subentry_id,\n)\n\n\n@dataclass\nclass RasterDownload:\n path: Path\n images: List[Path]\n ancillary: List[Path]\n\n\nclass Rgdc:\n def __init__(\n self,\n api_url: str = DEFAULT_RGD_API,\n username: Optional[str] = None,\n password: Optional[str] = None,\n ):\n \"\"\"\n Initialize a RGD Client.\n\n Args:\n api_url: The base url of the RGD API instance.\n username: The username to authenticate to the instance with, if any.\n password: The password associated with the provided username. If None, a prompt will be provided.\n\n Returns:\n A new Rgdc instance.\n \"\"\"\n auth_header = None\n\n # Prompt for password if not provided\n if username is not None and password is None:\n password = getpass.getpass()\n\n if username and password:\n encoded_credentials = b64encode(f'{username}:{password}'.encode('utf-8')).decode()\n auth_header = f'Basic {encoded_credentials}'\n\n self.session = RgdcSession(base_url=api_url, auth_header=auth_header)\n\n def list_image_tiles(self, image_id: Union[str, int]) -> Dict:\n \"\"\"List geodata imagery tiles.\"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/tiles')\n return r.json()\n\n def download_image_file(\n self, image_id: Union[str, int], chunk_size: int = 1024 * 1024\n ) -> Iterator[bytes]:\n \"\"\"\n Download the associated ImageFile data for this ImageEntry directly from S3.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).\n\n Returns:\n An iterator of byte chunks.\n \"\"\"\n r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)\n return r.iter_content(chunk_size=chunk_size)\n\n def download_image_thumbnail(\n self,\n image_id: Union[str, int],\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n image_id: The ID of the ImageEntry to download.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n r = self.session.get(f'image_process/imagery/{image_id}/thumbnail')\n return r.content\n\n def download_raster_thumbnail(\n self,\n raster_meta_id: Union[str, int, dict],\n band: int = 0,\n ) -> bytes:\n \"\"\"\n Download the generated thumbnail for this ImageEntry.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n band: The index of the image in the raster's image set to produce thumbnail from.\n\n Returns:\n Thumbnail bytes.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n images = parent_raster.get('image_set', {}).get('images', [])\n try:\n return self.download_image_thumbnail(images[band]['id'])\n except IndexError:\n raise IndexError(f'Band index ({band}) out of range.')\n\n def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:\n \"\"\"Get raster entry detail.\n\n Args:\n stac: Optionally return as STAC Item dictionary/JSON.\n\n Returns:\n Serialized object representation.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n if stac:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac')\n else:\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n return r.json()\n\n def download_raster(\n self,\n raster_meta_id: Union[str, int, dict],\n pathname: Optional[str] = None,\n nest_with_name: bool = False,\n keep_existing: bool = True,\n ) -> RasterDownload:\n \"\"\"\n Download the image set associated with a raster entry to disk.\n\n Args:\n raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.\n pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.\n nest_with_name: If True, nests the download within an additional directory, using the raster entry name.\n keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.\n\n Returns:\n A dictionary of the paths to all files downloaded under the directory.\n \"\"\"\n if isinstance(raster_meta_id, dict):\n raster_meta_id = spatial_subentry_id(raster_meta_id)\n\n r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')\n parent_raster = r.json().get('parent_raster', {})\n\n # Create dirs after request to avoid empty dirs if failed\n if pathname is None:\n pathname = tempfile.mkdtemp()\n\n # Handle optional nesting with raster entry name\n path = Path(pathname)\n parent_raster_name: Optional[str] = parent_raster.get('name')\n\n if nest_with_name and parent_raster_name:\n path = path / parent_raster_name\n\n # Ensure base download directory exists\n if not path.exists():\n path.mkdir()\n\n # Initialize dataclass\n raster_download = RasterDownload(path, [], [])\n\n # Download images\n images = parent_raster.get('image_set', {}).get('images', [])\n for image in tqdm(images, desc='Downloading image files'):\n file = image.get('file', {})\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.images.append(file_path)\n\n # Download ancillary files\n ancillary = parent_raster.get('ancillary_files', [])\n for file in tqdm(ancillary, desc='Downloading ancillary files'):\n file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)\n if file_path:\n raster_download.ancillary.append(file_path)\n\n return raster_download\n\n def search(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for geospatial entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n r = self.session.get('rgd/search', params=params)\n r.raise_for_status()\n\n return r.json()\n\n def create_raster_stac(self, raster: Dict) -> Dict:\n \"\"\"Create a raster entry using STAC format.\"\"\"\n r = self.session.post('rgd_imagery/raster/stac', json=raster)\n r.raise_for_status()\n\n return r.json()\n\n def search_raster_stac(\n self,\n query: Optional[Union[Dict, str]] = None,\n predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,\n relates: Optional[str] = None,\n distance: Optional[Tuple[float, float]] = None,\n acquired: Optional[DATETIME_OR_STR_TUPLE] = None,\n instrumentation: Optional[str] = None,\n num_bands: Optional[Tuple[int, int]] = None,\n resolution: Optional[Tuple[int, int]] = None,\n cloud_cover: Optional[Tuple[float, float]] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n ) -> List[Dict]:\n \"\"\"\n Search for raster entries based on various criteria.\n\n For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.\n E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.\n\n Args:\n query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.\n predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will\n be used to filter data such that predicate(a, b) where b is the queried geometry.\n relates: Specify exactly how the queried geometry should relate to the data using a\n DE-9IM string code.\n distance: The min/max distance around the queried geometry in meters.\n acquired: The min/max date and time (ISO 8601) when data was acquired.\n instrumentation: The instrumentation used to acquire at least one of these data.\n num_bands: The min/max number of bands in the raster.\n resolution: The min/max resolution of the raster.\n cloud_cover: The min/max cloud coverage of the raster.\n limit: The maximum number of results to return.\n offset: The number of results to skip.\n\n Returns:\n A list of Spatial Entries in STAC Item format.\n \"\"\"\n params = spatial_search_params(\n query=query,\n predicate=predicate,\n relates=relates,\n distance=distance,\n acquired=acquired,\n instrumentation=instrumentation,\n limit=limit,\n offset=offset,\n )\n\n if num_bands and len(num_bands) == 2:\n nbmin, nbmax = num_bands\n params['num_bands_min'] = nbmin\n params['num_bands_max'] = nbmax\n\n if resolution and len(resolution) == 2:\n rmin, rmax = resolution\n params['resolution_min'] = rmin\n params['resolution_max'] = rmax\n\n if cloud_cover and len(cloud_cover) == 2:\n ccmin, ccmax = cloud_cover\n params['cloud_cover_min'] = ccmin\n params['cloud_cover_max'] = ccmax\n\n return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))\n",
"path": "rgd-client/rgd_client/rgdc.py"
}
] | diff --git a/.github/workflows/rgd-client.yml b/.github/workflows/rgd-client.yml
index adec6aa96..c9a35469b 100644
--- a/.github/workflows/rgd-client.yml
+++ b/.github/workflows/rgd-client.yml
@@ -5,25 +5,44 @@ on:
pull_request:
branches: "**"
jobs:
- deploy:
+ rgd-client:
runs-on: ubuntu-latest
+ services:
+ postgres:
+ image: postgis/postgis:latest
+ env:
+ POSTGRES_DB: django
+ POSTGRES_PASSWORD: postgres
+ ports:
+ - 5432:5432
+ rabbitmq:
+ image: rabbitmq:management
+ ports:
+ - 5672:5672
+ minio:
+ # This image does not require any command arguments (which GitHub Actions don't support)
+ image: bitnami/minio:latest
+ env:
+ MINIO_ACCESS_KEY: minioAccessKey
+ MINIO_SECRET_KEY: minioSecretKey
+ ports:
+ - 9000:9000
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: "3.8"
- - name: Install dependencies
+ - name: Install tox
run: |
- python -m pip install --upgrade pip
- pip install setuptools pytest
- - name: Install rgd-client
- working-directory: ./rgd-client
- run: pip install -e .
- - name: Test import
- working-directory: ./rgd-client
+ pip install --upgrade pip
+ pip install tox
+ - name: Test rgd-client
run: |
- python -c "from rgd_client import Rgdc; print('Import went okay!')"
- # - name: Test
- # working-directory: ./rgd-client
- # run: pytest . -v
+ tox -e test-rgd-client
+ env:
+ MINIO_STORAGE_ENDPOINT: localhost:9000
+ MINIO_STORAGE_ACCESS_KEY: minioAccessKey
+ MINIO_STORAGE_SECRET_KEY: minioSecretKey
+ CELERY_BROKER_URL: amqp://localhost:5672/
+ DATABASE_HOST: localhost
diff --git a/rgd-client/rgd_client/rgdc.py b/rgd-client/rgd_client/rgdc.py
index 5f624ab98..f1574bcb8 100644
--- a/rgd-client/rgd_client/rgdc.py
+++ b/rgd-client/rgd_client/rgdc.py
@@ -238,7 +238,11 @@ def search(
limit=limit,
offset=offset,
)
- return list(limit_offset_pager(self.session, 'rgd/search', params=params))
+
+ r = self.session.get('rgd/search', params=params)
+ r.raise_for_status()
+
+ return r.json()
def create_raster_stac(self, raster: Dict) -> Dict:
"""Create a raster entry using STAC format."""
diff --git a/rgd-client/tests/__init__.py b/rgd-client/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/rgd-client/tests/conftest.py b/rgd-client/tests/conftest.py
new file mode 100644
index 000000000..6eaad55a1
--- /dev/null
+++ b/rgd-client/tests/conftest.py
@@ -0,0 +1,25 @@
+from django.contrib.auth.models import User
+import pytest
+
+from rgd_client import Rgdc
+
+from .data_fixtures import generate_fixtures
+
+# dynamic fixtures: populate commands
+for (name, fixture) in generate_fixtures():
+ globals()[name] = fixture
+
+
[email protected]
+def py_client(live_server):
+
+ params = {'username': '[email protected]', 'email': '[email protected]', 'password': 'password'}
+
+ user = User.objects.create_user(is_staff=True, is_superuser=True, **params)
+ user.save()
+
+ client = Rgdc(
+ username=params['username'], password=params['password'], api_url=f'{live_server.url}/api'
+ )
+
+ return client
diff --git a/rgd-client/tests/data_fixtures.py b/rgd-client/tests/data_fixtures.py
new file mode 100644
index 000000000..5a663f295
--- /dev/null
+++ b/rgd-client/tests/data_fixtures.py
@@ -0,0 +1,21 @@
+from django.core.management import call_command
+import pytest
+
+sources = [
+ 'rgd_3d_demo',
+ 'rgd_fmv_demo',
+ 'rgd_geometry_demo',
+ 'rgd_imagery_demo',
+]
+
+# dynamically creates fixtures for above management commands
+
+
+def generate_fixtures():
+ for s in sources:
+
+ @pytest.fixture
+ def data_fixture():
+ call_command(s)
+
+ yield (s, data_fixture)
diff --git a/rgd-client/tests/manage.py b/rgd-client/tests/manage.py
new file mode 100755
index 000000000..9e883ec7a
--- /dev/null
+++ b/rgd-client/tests/manage.py
@@ -0,0 +1,14 @@
+#!/usr/bin/env python
+import os
+import sys
+
+from django.core.management import execute_from_command_line
+
+
+def main() -> None:
+ os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings')
+ execute_from_command_line(sys.argv)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/rgd-client/tests/test_client.py b/rgd-client/tests/test_client.py
new file mode 100644
index 000000000..f1e89aa94
--- /dev/null
+++ b/rgd-client/tests/test_client.py
@@ -0,0 +1,80 @@
+import json
+
+import pytest
+
+bbox = {
+ 'type': 'Polygon',
+ 'coordinates': [
+ [
+ [-105.45091240368326, 39.626245373878696],
+ [-105.45091240368326, 39.929904289147274],
+ [-104.88775649170178, 39.929904289147274],
+ [-104.88775649170178, 39.626245373878696],
+ [-105.45091240368326, 39.626245373878696],
+ ]
+ ],
+}
+
+
[email protected]_db(transaction=True)
+def test_basic_search(py_client, rgd_imagery_demo):
+
+ q = py_client.search(query=json.dumps(bbox), predicate='intersects')
+
+ assert any(x['subentry_name'] == 'afie_1.jpg' for x in q)
+
+ assert any(
+ x['subentry_name'] == 'LC08_L1TP_034032_20200429_20200509_01_T1_sr_band1.tif' for x in q
+ )
+
+
[email protected]_db(transaction=True)
+def test_inspect_raster(py_client, rgd_imagery_demo):
+
+ q = py_client.search(query=json.dumps(bbox), predicate='intersects')
+
+ raster_meta = next(
+ (
+ x
+ for x in q
+ if x['subentry_name'] == 'LC08_L1TP_034032_20200429_20200509_01_T1_sr_band1.tif'
+ ),
+ None,
+ )
+
+ assert raster_meta is not None
+
+ try:
+ raster = py_client.get_raster(raster_meta)
+ count = len(raster['parent_raster']['image_set']['images'])
+ except Exception:
+ pytest.fail('Failed to get raster from meta')
+
+ for i in range(count):
+ try:
+ py_client.download_raster_thumbnail(raster_meta, band=i)
+ except Exception:
+ pytest.fail(f'Failed to download raster thumbnail {i}')
+
+
[email protected]_db(transaction=True)
+def test_download_raster(py_client, rgd_imagery_demo):
+
+ q = py_client.search(query=json.dumps(bbox), predicate='intersects')
+
+ assert len(q) >= 1
+
+ try:
+ py_client.download_raster(q[0])
+ except Exception as e:
+ print(e)
+ pytest.fail('Failed to download raster image set')
+
+
+# TODO: figure out TemplateDoesNotExist error
+# def test_basic_stac_search(rgd_imagery_demo):
+
+# try:
+# py_client.search_raster_stac(query=json.dumps(bbox), predicate='intersects')
+# except Exception:
+# pytest.fail('Failed STAC search')
diff --git a/rgd-client/tests/test_project/__init__.py b/rgd-client/tests/test_project/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/rgd-client/tests/test_project/celery.py b/rgd-client/tests/test_project/celery.py
new file mode 100644
index 000000000..aaf507cb7
--- /dev/null
+++ b/rgd-client/tests/test_project/celery.py
@@ -0,0 +1,17 @@
+import os
+
+from celery import Celery
+
+# import configurations.importer
+#
+os.environ['DJANGO_SETTINGS_MODULE'] = 'test_project.settings'
+# if not os.environ.get('DJANGO_CONFIGURATION'):
+# raise ValueError('The environment variable "DJANGO_CONFIGURATION" must be set.')
+# configurations.importer.install()
+
+# Using a string config_source means the worker doesn't have to serialize
+# the configuration object to child processes.
+app = Celery('test_project', config_source='django.conf:settings', namespace='CELERY')
+
+# Load task modules from all registered Django app configs.
+app.autodiscover_tasks()
diff --git a/rgd-client/tests/test_project/settings.py b/rgd-client/tests/test_project/settings.py
new file mode 100644
index 000000000..fa2fa9b7e
--- /dev/null
+++ b/rgd-client/tests/test_project/settings.py
@@ -0,0 +1,22 @@
+from rgd_testing_utils.settings import * # noqa
+
+INSTALLED_APPS += [ # noqa
+ 'rgd_3d',
+ 'rgd_fmv',
+ 'rgd_geometry',
+ 'rgd_imagery',
+ # Swagger
+ 'drf_yasg',
+ 'django_extensions',
+]
+
+ROOT_URLCONF = 'test_project.urls'
+WSGI_APPLICATION = 'test_project.wsgi.application'
+
+# Swagger
+REFETCH_SCHEMA_WITH_AUTH = True
+REFETCH_SCHEMA_ON_LOGOUT = True
+OPERATIONS_SORTER = 'alpha'
+DEEP_LINKING = True
+
+STATIC_URL = '/static/'
diff --git a/rgd-client/tests/test_project/urls.py b/rgd-client/tests/test_project/urls.py
new file mode 100644
index 000000000..610edd7e6
--- /dev/null
+++ b/rgd-client/tests/test_project/urls.py
@@ -0,0 +1,44 @@
+from django.contrib import admin
+from django.urls import include, path, re_path
+from django.views.generic.base import RedirectView
+from drf_yasg import openapi
+from drf_yasg.views import get_schema_view
+from rest_framework import permissions
+
+urlpatterns = [
+ path('admin/', admin.site.urls),
+ path('accounts/', include('allauth.urls')),
+ path('oauth/', include('oauth2_provider.urls', namespace='oauth2_provider')),
+ path('api/s3-upload/', include('s3_file_field.urls')),
+ path('', include('rgd.urls')),
+ path('', include('rgd_3d.urls')),
+ path('', include('rgd_fmv.urls')),
+ path('', include('rgd_geometry.urls')),
+ path('', include('rgd_imagery.urls')),
+ # Redirect homepage to RGD core app homepage
+ path(r'', RedirectView.as_view(url='rgd', permanent=False), name='index'),
+]
+
+schema_view = get_schema_view(
+ openapi.Info(
+ title='ResonantGeoData API',
+ default_version='v1',
+ description='ResonantGeoData',
+ # terms_of_service='https://www.google.com/policies/terms/',
+ contact=openapi.Contact(email='[email protected]'),
+ license=openapi.License(name='Apache 2.0'),
+ ),
+ public=False,
+ permission_classes=(permissions.AllowAny,),
+ patterns=urlpatterns,
+)
+
+urlpatterns += [
+ re_path(
+ r'^swagger(?P<format>\.json|\.yaml)$',
+ schema_view.without_ui(cache_timeout=0),
+ name='schema-json',
+ ),
+ path('swagger/', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
+ path('redoc/', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
+]
diff --git a/rgd-client/tests/test_project/wsgi.py b/rgd-client/tests/test_project/wsgi.py
new file mode 100644
index 000000000..1e6481c78
--- /dev/null
+++ b/rgd-client/tests/test_project/wsgi.py
@@ -0,0 +1,7 @@
+import os
+
+from django.core.wsgi import get_wsgi_application
+
+os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'test_project.settings')
+
+application = get_wsgi_application()
diff --git a/rgd-client/tests/test_status.py b/rgd-client/tests/test_status.py
new file mode 100644
index 000000000..4c2d0b034
--- /dev/null
+++ b/rgd-client/tests/test_status.py
@@ -0,0 +1,11 @@
+import requests
+
+import rgd_client
+
+
+def test_rgd_version():
+ assert rgd_client.__version__ # Make sure not None
+
+
+def test_server_status(live_server):
+ assert requests.get(live_server.url).status_code == 200
diff --git a/rgd-client/tests/test_version.py b/rgd-client/tests/test_version.py
deleted file mode 100644
index ace72d6a4..000000000
--- a/rgd-client/tests/test_version.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import rgd_client
-
-
-def test_rgd_version():
- assert rgd_client.__version__ # Make sure not None
diff --git a/testing-utils/rgd_testing_utils/settings.py b/testing-utils/rgd_testing_utils/settings.py
index e8b39e28c..2609e18d1 100644
--- a/testing-utils/rgd_testing_utils/settings.py
+++ b/testing-utils/rgd_testing_utils/settings.py
@@ -96,7 +96,10 @@
DEFAULT_FILE_STORAGE = 'minio_storage.storage.MinioMediaStorage'
MINIO_STORAGE_ENDPOINT = os.environ.get('MINIO_STORAGE_ENDPOINT', 'minio:9000')
MINIO_STORAGE_MEDIA_URL = os.environ.get(
- 'MINIO_STORAGE_MEDIA_URL', 'http://localhost:9000/django-storage'
+ 'MINIO_STORAGE_MEDIA_URL',
+ 'http://minio:9000/django-storage'
+ if MINIO_STORAGE_ENDPOINT == 'minio:9000'
+ else 'http://localhost:9000/django-storage',
)
MINIO_STORAGE_USE_HTTPS = False
MINIO_STORAGE_ACCESS_KEY = os.environ.get('MINIO_STORAGE_ACCESS_KEY', 'minioAccessKey')
diff --git a/tox.ini b/tox.ini
index 99fffa93c..4b462de46 100644
--- a/tox.ini
+++ b/tox.ini
@@ -121,6 +121,21 @@ commands =
coverage html -d {toxworkdir}/htmlcov/django-rgd-imagery
coverage xml -o {toxworkdir}/coverage.xml
+[testenv:test-rgd-client]
+changedir = rgd-client/tests
+deps =
+ -r requirements.txt
+ -e ./django-rgd
+ -e ./django-rgd-3d[worker]
+ -e ./django-rgd-fmv[worker]
+ -e ./django-rgd-geometry[worker]
+ -e ./django-rgd-imagery[worker]
+ -e ./rgd-client[dev]
+commands =
+ pytest {posargs}
+ # coverage html -d {toxworkdir}/htmlcov/django-rgd-client
+ # coverage xml -o {toxworkdir}/coverage.xml
+
[testenv:dev]
deps =
{[testenv:lint]deps}
| Proper testing for rgd_client
We need to implement real tests for the Python client
The tests for this would require running RGD with data prepopulated in the background then executing the client tests
|
canonical__snapcraft-80 | [
{
"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport apt\nimport filecmp\nimport glob\nimport logging\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport snapcraft.yaml\nfrom snapcraft import common\nfrom snapcraft import lifecycle\nfrom snapcraft import meta\n\nlogger = logging.getLogger(__name__)\n\n\n_TEMPLATE_YAML = r'''name: # the name of the snap\nversion: # the version of the snap\n# The vendor for the snap (replace 'Vendor <[email protected]>')\nvendor: Vendor <[email protected]>\nsummary: # 79 char long summary\ndescription: # A longer description for the snap\nicon: # A path to an icon for the package\n'''\n\n\n_config = None\n\n\ndef init(args):\n if os.path.exists('snapcraft.yaml'):\n logger.error('snapcraft.yaml already exists!')\n sys.exit(1)\n yaml = _TEMPLATE_YAML\n if args.part:\n yaml += 'parts:\\n'\n for part_name in args.part:\n part = lifecycle.load_plugin(part_name, part_name)\n yaml += ' ' + part.name + ':\\n'\n for opt in part.config.get('options', []):\n if part.config['options'][opt].get('required', False):\n yaml += ' ' + opt + ':\\n'\n yaml = yaml.strip()\n with open('snapcraft.yaml', mode='w+') as f:\n f.write(yaml)\n logger.info('Wrote the following as snapcraft.yaml.')\n print()\n print(yaml)\n sys.exit(0)\n\n\ndef shell(args):\n config = _load_config()\n common.env = config.stage_env()\n userCommand = args.userCommand\n if not userCommand:\n userCommand = ['/usr/bin/env',\n 'PS1=\\[\\e[1;32m\\]snapcraft:\\w\\$\\[\\e[0m\\] ',\n '/bin/bash',\n '--norc']\n common.run(userCommand)\n\n\ndef snap(args):\n cmd(args)\n\n # This check is to support manual assembly.\n if not os.path.exists(os.path.join(common.get_snapdir(), 'meta')):\n arches = [snapcraft.common.get_arch(), ]\n\n config = _load_config()\n\n # FIXME this should be done in a more contained manner\n common.env = config.snap_env()\n\n meta.create(config.data, arches)\n\n\ndef assemble(args):\n args.cmd = 'snap'\n # With all the data in snapcraft.yaml, maybe it's not a good idea to call\n # snap(args) and just do a snappy build if assemble was explicitly called.\n snap(args)\n common.run(['snappy', 'build', common.get_snapdir()])\n\n\ndef _find_latest_private_key():\n \"\"\"\n Find the latest private key in ~/.ssh.\n\n :returns:\n Path of the most-recently-modified private SSH key\n :raises LookupError:\n If no such key was found.\n\n This function tries to mimic the logic found in ``ubuntu-device-flash``. It\n will look for the most recently modified private key in the users' SSH\n configuration directory.\n \"\"\"\n candidates = []\n ssh_dir = os.path.expanduser('~/.ssh/')\n for filename in os.listdir(ssh_dir):\n # Skip public keys, we want the private key\n if filename.endswith('.pub'):\n continue\n ssh_key = os.path.join(ssh_dir, filename)\n # Skip non-files\n if not os.path.isfile(ssh_key):\n continue\n # Ensure that it is a real ssh key\n with open(ssh_key, 'rb') as stream:\n if stream.readline() != b'-----BEGIN RSA PRIVATE KEY-----\\n':\n continue\n candidates.append(ssh_key)\n # Sort the keys by modification time, pick the most recent key\n candidates.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)\n logger.debug('Available ssh public keys: %r', candidates)\n if not candidates:\n raise LookupError('Unable to find any private ssh key')\n return candidates[0]\n\n\ndef run(args):\n # We are mostly making sure we are operating from the correct location. In\n # the future this could do more by using target attribute in snapcraft.yaml\n # to create the correct target image.\n _load_config()\n # Find the ssh key that ubuntu-device-flash would use so that we can use it\n # ourselves as well. This may not be the default key that the user has\n # configured.\n # See: https://bugs.launchpad.net/snapcraft/+bug/1486659\n try:\n ssh_key = _find_latest_private_key()\n except LookupError:\n logger.error('You need to have an SSH key to use this command')\n logger.error('Please generate one with ssh-keygen(1)')\n return 1\n else:\n logger.info('Using the following ssh key: %s', ssh_key)\n\n # Find available *.snap files to copy into the test VM\n snap_dir = os.path.join(os.getcwd())\n # copy the snap with the largest version number into the test VM\n snaps = glob.glob(snap_dir + '/*.snap')\n snaps.sort()\n if not snaps:\n logger.error('There are no .snap files ready')\n logger.error('Perhaps you forgot to run \"snapcraft assemble\"')\n return 1\n\n qemudir = os.path.join(os.getcwd(), 'image')\n qemu_img = os.path.join(qemudir, '15.04.img')\n if not os.path.exists(qemu_img):\n os.makedirs(qemudir, exist_ok=True)\n logger.info(\n 'Setting up virtual snappy environment, root access required')\n common.run([\n 'sudo', 'ubuntu-device-flash', 'core', '15.04', '--developer-mode',\n '--enable-ssh', '-o', os.path.relpath(qemu_img, qemudir)],\n cwd=qemudir)\n qemu = None\n try:\n # Allow the developer to provide additional arguments to qemu. This\n # can be used, for example, to pass through USB devices from the host.\n # This can enable a lot of hardware-specific use cases directly inside\n # the snapcraft run workflow.\n #\n # For example:\n # $ export SNAPCRAFT_RUN_QEMU_ARGS=\\\n # \"-usb -device usb-host,hostbus=1,hostaddr=10\"\n # $ snapcraft run\n qemu_args = os.getenv('SNAPCRAFT_RUN_QEMU_ARGS')\n if qemu_args is not None:\n qemu_args = shlex.split(qemu_args)\n else:\n qemu_args = []\n qemu = subprocess.Popen(\n ['kvm', '-m', '768', '-nographic', '-snapshot', '-redir',\n 'tcp:8022::22', qemu_img] + qemu_args, stdin=subprocess.PIPE)\n n = tempfile.NamedTemporaryFile()\n ssh_opts = [\n # We want to login with the specified ssh identity (key)\n '-i', ssh_key,\n # We don't want strict host checking because it's a new VM with a\n # random key each time.\n '-oStrictHostKeyChecking=no',\n # We don't want to pollute the known_hosts file with new entries\n # all the time so let's use a temporary file for that\n '-oUserKnownHostsFile={}'.format(n.name),\n # Don't try keyboard interactive authentication, we're expecting to\n # login via the key and if that doesn't work then everything else\n # will fail anyway.\n '-oKbdInteractiveAuthentication=no',\n ]\n while True:\n ret_code = _call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'true'])\n if ret_code == 0:\n break\n print('Waiting for device')\n time.sleep(1)\n # copy the most recent snap into the test VM\n _check_call(\n ['scp'] + ssh_opts + [\n '-P', '8022', snaps[-1], 'ubuntu@localhost:~/'])\n # install the snap\n _check_call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])\n # \"login\"\n _check_call(\n ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],\n preexec_fn=os.setsid)\n finally:\n if qemu:\n qemu.kill()\n\n\ndef list_plugins(args=None):\n import pkgutil\n import snapcraft.plugins\n\n for importer, modname, is_package in pkgutil.iter_modules(\n snapcraft.plugins.__path__):\n if not is_package:\n print(modname.replace('_', '-'))\n\n\ndef clean(args):\n config = _load_config()\n\n for part in config.all_parts:\n logger.info('Cleaning up for part %r', part.name)\n if os.path.exists(part.partdir):\n shutil.rmtree(part.partdir)\n\n # parts dir does not contain only generated code.\n if (os.path.exists(common.get_partsdir()) and\n not os.listdir(common.get_partsdir())):\n os.rmdir(common.get_partsdir())\n\n logger.info('Cleaning up staging area')\n if os.path.exists(common.get_stagedir()):\n shutil.rmtree(common.get_stagedir())\n\n logger.info('Cleaning up snapping area')\n if os.path.exists(common.get_snapdir()):\n shutil.rmtree(common.get_snapdir())\n\n\ndef _check_for_collisions(parts):\n parts_files = {}\n for part in parts:\n # Gather our own files up\n fileset = getattr(part.code.options, 'stage', ['*']) or ['*']\n part_files, _ = lifecycle.migratable_filesets(\n fileset,\n part.installdir)\n\n # Scan previous parts for collisions\n for other_part_name in parts_files:\n common = part_files & parts_files[other_part_name]['files']\n conflict_files = []\n for f in common:\n this = os.path.join(part.installdir, f)\n other = os.path.join(\n parts_files[other_part_name]['installdir'],\n f)\n if os.path.islink(this) and os.path.islink(other):\n continue\n if not filecmp.cmp(this, other, shallow=False):\n conflict_files.append(f)\n\n if conflict_files:\n logger.error('Error: parts %s and %s have the following file '\n 'paths in common which have different '\n 'contents:\\n %s',\n other_part_name,\n part.name,\n '\\n '.join(sorted(conflict_files)))\n\n return False\n\n # And add our files to the list\n parts_files[part.name] = {'files': part_files,\n 'installdir': part.installdir}\n\n return True\n\n\ndef cmd(args):\n forceAll = args.force\n forceCommand = None\n\n cmds = [args.cmd]\n\n if cmds[0] in common.COMMAND_ORDER:\n forceCommand = cmds[0]\n cmds = common.COMMAND_ORDER[0:common.COMMAND_ORDER.index(cmds[0]) + 1]\n\n config = _load_config()\n _install_build_packages(config.build_tools)\n\n # clean the snap dir before Snapping\n snap_clean = False\n\n for part in config.all_parts:\n for cmd in cmds:\n if cmd is 'stage':\n # This ends up running multiple times, as each part gets to its\n # staging cmd. That's inefficient, but largely OK.\n # FIXME: fix the above by iterating over cmds before iterating\n # all_parts. But then we need to make sure we continue to\n # handle cases like go, where you want go built before trying\n # to pull a go project.\n if not _check_for_collisions(config.all_parts):\n sys.exit(1)\n\n # We want to make sure we have a clean snap dir\n if cmd is 'snap' and not snap_clean:\n shutil.rmtree(common.get_snapdir())\n snap_clean = True\n\n common.env = config.build_env_for_part(part)\n force = forceAll or cmd == forceCommand\n\n try:\n getattr(part, cmd)(force=force)\n except Exception as e:\n logger.error('Failed doing %s for %s: %s', cmd, part.name, e)\n sys.exit(1)\n\n\ndef _call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.call(args, **kwargs)\n\n\ndef _check_call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.check_call(args, **kwargs)\n\n\ndef _install_build_packages(packages):\n new_packages = []\n for pkg in packages:\n try:\n if not apt.Cache()[pkg].installed:\n new_packages.append(pkg)\n except KeyError:\n logger.error('Could not find all the \"build-packages\" required '\n 'in snapcraft.yaml')\n sys.exit(1)\n if new_packages:\n logger.info('Installing required packages on the host system')\n _check_call(['sudo', 'apt-get', '-o', 'Dpkg::Progress-Fancy=1',\n '--no-install-recommends',\n '-y', 'install'] + new_packages)\n\n\ndef _load_config():\n global _config\n if _config:\n return _config\n\n try:\n _config = snapcraft.yaml.Config()\n return _config\n except snapcraft.yaml.SnapcraftYamlFileError as e:\n logger.error(\n 'Could not find {}. Are you sure you are in the right '\n 'directory?\\nTo start a new project, use \\'snapcraft '\n 'init\\''.format(e.file))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftSchemaError as e:\n msg = 'Issues while validating snapcraft.yaml: {}'.format(e.message)\n logger.error(msg)\n sys.exit(1)\n except snapcraft.yaml.PluginNotDefinedError as e:\n logger.error(\n 'Issues while validating snapcraft.yaml: the \"plugin\" keyword is '\n 'missing for the \"{}\" part.'.format(e.part))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftLogicError as e:\n logger.error('Issue detected while analyzing '\n 'snapcraft.yaml: {}'.format(e.message))\n sys.exit(1)\n except lifecycle.PluginError as e:\n logger.error('Issue while loading plugin: {}'.format(e))\n",
"path": "snapcraft/cmds.py"
}
] | [
{
"content": "# -*- Mode:Python; indent-tabs-mode:nil; tab-width:4 -*-\n#\n# Copyright (C) 2015 Canonical Ltd\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License version 3 as\n# published by the Free Software Foundation.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see <http://www.gnu.org/licenses/>.\n\nimport apt\nimport filecmp\nimport glob\nimport logging\nimport os\nimport shlex\nimport shutil\nimport subprocess\nimport sys\nimport tempfile\nimport time\n\nimport snapcraft.yaml\nfrom snapcraft import common\nfrom snapcraft import lifecycle\nfrom snapcraft import meta\n\nlogger = logging.getLogger(__name__)\n\n\n_TEMPLATE_YAML = r'''name: # the name of the snap\nversion: # the version of the snap\n# The vendor for the snap (replace 'Vendor <[email protected]>')\nvendor: Vendor <[email protected]>\nsummary: # 79 char long summary\ndescription: # A longer description for the snap\nicon: # A path to an icon for the package\n'''\n\n\n_config = None\n\n\ndef init(args):\n if os.path.exists('snapcraft.yaml'):\n logger.error('snapcraft.yaml already exists!')\n sys.exit(1)\n yaml = _TEMPLATE_YAML\n if args.part:\n yaml += 'parts:\\n'\n for part_name in args.part:\n part = lifecycle.load_plugin(part_name, part_name)\n yaml += ' ' + part.name + ':\\n'\n for opt in part.config.get('options', []):\n if part.config['options'][opt].get('required', False):\n yaml += ' ' + opt + ':\\n'\n yaml = yaml.strip()\n with open('snapcraft.yaml', mode='w+') as f:\n f.write(yaml)\n logger.info('Wrote the following as snapcraft.yaml.')\n print()\n print(yaml)\n sys.exit(0)\n\n\ndef shell(args):\n config = _load_config()\n common.env = config.stage_env()\n userCommand = args.userCommand\n if not userCommand:\n userCommand = ['/usr/bin/env',\n 'PS1=\\[\\e[1;32m\\]snapcraft:\\w\\$\\[\\e[0m\\] ',\n '/bin/bash',\n '--norc']\n common.run(userCommand)\n\n\ndef snap(args):\n cmd(args)\n\n # This check is to support manual assembly.\n if not os.path.exists(os.path.join(common.get_snapdir(), 'meta')):\n arches = [snapcraft.common.get_arch(), ]\n\n config = _load_config()\n\n # FIXME this should be done in a more contained manner\n common.env = config.snap_env()\n\n meta.create(config.data, arches)\n\n\ndef assemble(args):\n args.cmd = 'snap'\n # With all the data in snapcraft.yaml, maybe it's not a good idea to call\n # snap(args) and just do a snappy build if assemble was explicitly called.\n snap(args)\n common.run(['snappy', 'build', common.get_snapdir()])\n\n\ndef _find_latest_private_key():\n \"\"\"\n Find the latest private key in ~/.ssh.\n\n :returns:\n Path of the most-recently-modified private SSH key\n :raises LookupError:\n If no such key was found.\n\n This function tries to mimic the logic found in ``ubuntu-device-flash``. It\n will look for the most recently modified private key in the users' SSH\n configuration directory.\n \"\"\"\n candidates = []\n ssh_dir = os.path.expanduser('~/.ssh/')\n for filename in os.listdir(ssh_dir):\n # Skip public keys, we want the private key\n if filename.endswith('.pub'):\n continue\n ssh_key = os.path.join(ssh_dir, filename)\n # Skip non-files\n if not os.path.isfile(ssh_key):\n continue\n # Ensure that it is a real ssh key\n with open(ssh_key, 'rb') as stream:\n if stream.readline() != b'-----BEGIN RSA PRIVATE KEY-----\\n':\n continue\n candidates.append(ssh_key)\n # Sort the keys by modification time, pick the most recent key\n candidates.sort(key=lambda f: os.stat(f).st_mtime, reverse=True)\n logger.debug('Available ssh public keys: %r', candidates)\n if not candidates:\n raise LookupError('Unable to find any private ssh key')\n return candidates[0]\n\n\ndef run(args):\n # We are mostly making sure we are operating from the correct location. In\n # the future this could do more by using target attribute in snapcraft.yaml\n # to create the correct target image.\n _load_config()\n # Find the ssh key that ubuntu-device-flash would use so that we can use it\n # ourselves as well. This may not be the default key that the user has\n # configured.\n # See: https://bugs.launchpad.net/snapcraft/+bug/1486659\n try:\n ssh_key = _find_latest_private_key()\n except LookupError:\n logger.error('You need to have an SSH key to use this command')\n logger.error('Please generate one with ssh-keygen(1)')\n return 1\n else:\n logger.info('Using the following ssh key: %s', ssh_key)\n\n # Find available *.snap files to copy into the test VM\n snap_dir = os.path.join(os.getcwd())\n # copy the snap with the largest version number into the test VM\n snaps = glob.glob(snap_dir + '/*.snap')\n snaps.sort()\n if not snaps:\n logger.error('There are no .snap files ready')\n logger.error('Perhaps you forgot to run \"snapcraft assemble\"')\n return 1\n\n qemudir = os.path.join(os.getcwd(), 'image')\n qemu_img = os.path.join(qemudir, '15.04.img')\n if not os.path.exists(qemu_img):\n os.makedirs(qemudir, exist_ok=True)\n logger.info(\n 'Setting up virtual snappy environment, root access required')\n common.run([\n 'sudo', 'ubuntu-device-flash', 'core', '15.04', '--developer-mode',\n '--enable-ssh', '-o', os.path.relpath(qemu_img, qemudir)],\n cwd=qemudir)\n qemu = None\n try:\n # Allow the developer to provide additional arguments to qemu. This\n # can be used, for example, to pass through USB devices from the host.\n # This can enable a lot of hardware-specific use cases directly inside\n # the snapcraft run workflow.\n #\n # For example:\n # $ export SNAPCRAFT_RUN_QEMU_ARGS=\\\n # \"-usb -device usb-host,hostbus=1,hostaddr=10\"\n # $ snapcraft run\n qemu_args = os.getenv('SNAPCRAFT_RUN_QEMU_ARGS')\n if qemu_args is not None:\n qemu_args = shlex.split(qemu_args)\n else:\n qemu_args = []\n qemu = subprocess.Popen(\n ['kvm', '-m', '768', '-nographic', '-snapshot', '-redir',\n 'tcp:8022::22', qemu_img] + qemu_args, stdin=subprocess.PIPE)\n n = tempfile.NamedTemporaryFile()\n ssh_opts = [\n # We want to login with the specified ssh identity (key)\n '-i', ssh_key,\n # We don't want strict host checking because it's a new VM with a\n # random key each time.\n '-oStrictHostKeyChecking=no',\n # We don't want to pollute the known_hosts file with new entries\n # all the time so let's use a temporary file for that\n '-oUserKnownHostsFile={}'.format(n.name),\n # Don't try keyboard interactive authentication, we're expecting to\n # login via the key and if that doesn't work then everything else\n # will fail anyway.\n '-oKbdInteractiveAuthentication=no',\n ]\n while True:\n ret_code = _call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'true'])\n if ret_code == 0:\n break\n print('Waiting for device')\n time.sleep(1)\n # copy the most recent snap into the test VM\n _check_call(\n ['scp'] + ssh_opts + [\n '-P', '8022', snaps[-1], 'ubuntu@localhost:~/'])\n # install the snap\n _check_call(\n ['ssh'] + ssh_opts +\n ['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])\n # \"login\"\n _call(\n ['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],\n preexec_fn=os.setsid)\n finally:\n if qemu:\n qemu.kill()\n\n\ndef list_plugins(args=None):\n import pkgutil\n import snapcraft.plugins\n\n for importer, modname, is_package in pkgutil.iter_modules(\n snapcraft.plugins.__path__):\n if not is_package:\n print(modname.replace('_', '-'))\n\n\ndef clean(args):\n config = _load_config()\n\n for part in config.all_parts:\n logger.info('Cleaning up for part %r', part.name)\n if os.path.exists(part.partdir):\n shutil.rmtree(part.partdir)\n\n # parts dir does not contain only generated code.\n if (os.path.exists(common.get_partsdir()) and\n not os.listdir(common.get_partsdir())):\n os.rmdir(common.get_partsdir())\n\n logger.info('Cleaning up staging area')\n if os.path.exists(common.get_stagedir()):\n shutil.rmtree(common.get_stagedir())\n\n logger.info('Cleaning up snapping area')\n if os.path.exists(common.get_snapdir()):\n shutil.rmtree(common.get_snapdir())\n\n\ndef _check_for_collisions(parts):\n parts_files = {}\n for part in parts:\n # Gather our own files up\n fileset = getattr(part.code.options, 'stage', ['*']) or ['*']\n part_files, _ = lifecycle.migratable_filesets(\n fileset,\n part.installdir)\n\n # Scan previous parts for collisions\n for other_part_name in parts_files:\n common = part_files & parts_files[other_part_name]['files']\n conflict_files = []\n for f in common:\n this = os.path.join(part.installdir, f)\n other = os.path.join(\n parts_files[other_part_name]['installdir'],\n f)\n if os.path.islink(this) and os.path.islink(other):\n continue\n if not filecmp.cmp(this, other, shallow=False):\n conflict_files.append(f)\n\n if conflict_files:\n logger.error('Error: parts %s and %s have the following file '\n 'paths in common which have different '\n 'contents:\\n %s',\n other_part_name,\n part.name,\n '\\n '.join(sorted(conflict_files)))\n\n return False\n\n # And add our files to the list\n parts_files[part.name] = {'files': part_files,\n 'installdir': part.installdir}\n\n return True\n\n\ndef cmd(args):\n forceAll = args.force\n forceCommand = None\n\n cmds = [args.cmd]\n\n if cmds[0] in common.COMMAND_ORDER:\n forceCommand = cmds[0]\n cmds = common.COMMAND_ORDER[0:common.COMMAND_ORDER.index(cmds[0]) + 1]\n\n config = _load_config()\n _install_build_packages(config.build_tools)\n\n # clean the snap dir before Snapping\n snap_clean = False\n\n for part in config.all_parts:\n for cmd in cmds:\n if cmd is 'stage':\n # This ends up running multiple times, as each part gets to its\n # staging cmd. That's inefficient, but largely OK.\n # FIXME: fix the above by iterating over cmds before iterating\n # all_parts. But then we need to make sure we continue to\n # handle cases like go, where you want go built before trying\n # to pull a go project.\n if not _check_for_collisions(config.all_parts):\n sys.exit(1)\n\n # We want to make sure we have a clean snap dir\n if cmd is 'snap' and not snap_clean:\n shutil.rmtree(common.get_snapdir())\n snap_clean = True\n\n common.env = config.build_env_for_part(part)\n force = forceAll or cmd == forceCommand\n\n try:\n getattr(part, cmd)(force=force)\n except Exception as e:\n logger.error('Failed doing %s for %s: %s', cmd, part.name, e)\n sys.exit(1)\n\n\ndef _call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.call(args, **kwargs)\n\n\ndef _check_call(args, **kwargs):\n logger.info('Running: %s', ' '.join(shlex.quote(arg) for arg in args))\n return subprocess.check_call(args, **kwargs)\n\n\ndef _install_build_packages(packages):\n new_packages = []\n for pkg in packages:\n try:\n if not apt.Cache()[pkg].installed:\n new_packages.append(pkg)\n except KeyError:\n logger.error('Could not find all the \"build-packages\" required '\n 'in snapcraft.yaml')\n sys.exit(1)\n if new_packages:\n logger.info('Installing required packages on the host system')\n _check_call(['sudo', 'apt-get', '-o', 'Dpkg::Progress-Fancy=1',\n '--no-install-recommends',\n '-y', 'install'] + new_packages)\n\n\ndef _load_config():\n global _config\n if _config:\n return _config\n\n try:\n _config = snapcraft.yaml.Config()\n return _config\n except snapcraft.yaml.SnapcraftYamlFileError as e:\n logger.error(\n 'Could not find {}. Are you sure you are in the right '\n 'directory?\\nTo start a new project, use \\'snapcraft '\n 'init\\''.format(e.file))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftSchemaError as e:\n msg = 'Issues while validating snapcraft.yaml: {}'.format(e.message)\n logger.error(msg)\n sys.exit(1)\n except snapcraft.yaml.PluginNotDefinedError as e:\n logger.error(\n 'Issues while validating snapcraft.yaml: the \"plugin\" keyword is '\n 'missing for the \"{}\" part.'.format(e.part))\n sys.exit(1)\n except snapcraft.yaml.SnapcraftLogicError as e:\n logger.error('Issue detected while analyzing '\n 'snapcraft.yaml: {}'.format(e.message))\n sys.exit(1)\n except lifecycle.PluginError as e:\n logger.error('Issue while loading plugin: {}'.format(e))\n",
"path": "snapcraft/cmds.py"
}
] | diff --git a/snapcraft/cmds.py b/snapcraft/cmds.py
index 96f7f7d2ac..5dd8d6c39b 100644
--- a/snapcraft/cmds.py
+++ b/snapcraft/cmds.py
@@ -229,7 +229,7 @@ def run(args):
['ssh'] + ssh_opts +
['ubuntu@localhost', '-p', '8022', 'sudo snappy install *.snap'])
# "login"
- _check_call(
+ _call(
['ssh'] + ssh_opts + ['-p', '8022', 'ubuntu@localhost'],
preexec_fn=os.setsid)
finally:
| Logging out from the run environment produces a traceback
(amd64)ubuntu@localhost:~$ logout
Connection to localhost closed.
Traceback (most recent call last):
File "/usr/bin/snapcraft", line 33, in <module>
snapcraft.main.main()
File "/usr/lib/python3/dist-packages/snapcraft/main.py", line 80, in main
args.func(args)
File "/usr/lib/python3/dist-packages/snapcraft/cmds.py", line 228, in run
preexec_fn=os.setsid)
File "/usr/lib/python3/dist-packages/snapcraft/cmds.py", line 343, in _check_call
return subprocess.check_call(args, **kwargs)
File "/usr/lib/python3.4/subprocess.py", line 561, in check_call
raise CalledProcessError(retcode, cmd)
subprocess.CalledProcessError: Command '['ssh', '-i', '/home/daniel/.ssh/ubuntudevice_0149BDCB0C009017_id_rsa', '-oStrictHostKeyChecking=no', '-oUserKnownHostsFile=/tmp/tmpcaocvoj7', '-oKbdInteractiveAuthentication=no', '-p', '8022', 'ubuntu@localhost']' returned non-zero exit status 1
daniel@daydream:~/dev/apps/bwm-ng.snap$
Launchpad Details: [#LP1499242](https://bugs.launchpad.net/bugs/1499242) Daniel Holbach - 2015-09-24 06:05:27 -0300
|
ManimCommunity__manim-3510 | [
{
"content": "#!/usr/bin/env python\n\n\nfrom manim import *\n\n# To watch one of these scenes, run the following:\n# python --quality m manim -p example_scenes.py SquareToCircle\n#\n# Use the flag --quality l for a faster rendering at a lower quality.\n# Use -s to skip to the end and just save the final frame\n# Use the -p to have preview of the animation (or image, if -s was\n# used) pop up once done.\n# Use -n <number> to skip ahead to the nth animation of a scene.\n# Use -r <number> to specify a resolution (for example, -r 1920,1080\n# for a 1920x1080 video)\n\n\nclass OpeningManim(Scene):\n def construct(self):\n title = Tex(r\"This is some \\LaTeX\")\n basel = MathTex(r\"\\sum_{n=1}^\\infty \\frac{1}{n^2} = \\frac{\\pi^2}{6}\")\n VGroup(title, basel).arrange(DOWN)\n self.play(\n Write(title),\n FadeIn(basel, shift=DOWN),\n )\n self.wait()\n\n transform_title = Tex(\"That was a transform\")\n transform_title.to_corner(UP + LEFT)\n self.play(\n Transform(title, transform_title),\n LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),\n )\n self.wait()\n\n grid = NumberPlane()\n grid_title = Tex(\"This is a grid\", font_size=72)\n grid_title.move_to(transform_title)\n\n self.add(grid, grid_title) # Make sure title is on top of grid\n self.play(\n FadeOut(title),\n FadeIn(grid_title, shift=UP),\n Create(grid, run_time=3, lag_ratio=0.1),\n )\n self.wait()\n\n grid_transform_title = Tex(\n r\"That was a non-linear function \\\\ applied to the grid\",\n )\n grid_transform_title.move_to(grid_title, UL)\n grid.prepare_for_nonlinear_transform()\n self.play(\n grid.animate.apply_function(\n lambda p: p\n + np.array(\n [\n np.sin(p[1]),\n np.sin(p[0]),\n 0,\n ],\n ),\n ),\n run_time=3,\n )\n self.wait()\n self.play(Transform(grid_title, grid_transform_title))\n self.wait()\n\n\nclass SquareToCircle(Scene):\n def construct(self):\n circle = Circle()\n square = Square()\n square.flip(RIGHT)\n square.rotate(-3 * TAU / 8)\n circle.set_fill(PINK, opacity=0.5)\n\n self.play(Create(square))\n self.play(Transform(square, circle))\n self.play(FadeOut(square))\n\n\nclass WarpSquare(Scene):\n def construct(self):\n square = Square()\n self.play(\n ApplyPointwiseFunction(\n lambda point: complex_to_R3(np.exp(R3_to_complex(point))),\n square,\n ),\n )\n self.wait()\n\n\nclass WriteStuff(Scene):\n def construct(self):\n example_text = Tex(\"This is a some text\", tex_to_color_map={\"text\": YELLOW})\n example_tex = MathTex(\n \"\\\\sum_{k=1}^\\\\infty {1 \\\\over k^2} = {\\\\pi^2 \\\\over 6}\",\n )\n group = VGroup(example_text, example_tex)\n group.arrange(DOWN)\n group.width = config[\"frame_width\"] - 2 * LARGE_BUFF\n\n self.play(Write(example_text))\n self.play(Write(example_tex))\n self.wait()\n\n\nclass UpdatersExample(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.animate.to_edge(DOWN),\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n\nclass SpiralInExample(Scene):\n def construct(self):\n logo_green = \"#81b29a\"\n logo_blue = \"#454866\"\n logo_red = \"#e07a5f\"\n\n font_color = \"#ece6e2\"\n\n pi = MathTex(r\"\\pi\").scale(7).set_color(font_color)\n pi.shift(2.25 * LEFT + 1.5 * UP)\n\n circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)\n square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)\n triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(\n RIGHT\n )\n pentagon = Polygon(\n *[\n [np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]\n for i in range(5)\n ],\n color=PURPLE_B,\n fill_opacity=1,\n stroke_width=0\n ).shift(UP + 2 * RIGHT)\n shapes = VGroup(triangle, square, circle, pentagon, pi)\n self.play(SpiralIn(shapes, fade_in_fraction=0.9))\n self.wait()\n self.play(FadeOut(shapes))\n\n\nTriangle.set_default(stroke_width=20)\n\n\nclass LineJoints(Scene):\n def construct(self):\n t1 = Triangle()\n t2 = Triangle(line_join=LineJointType.ROUND)\n t3 = Triangle(line_join=LineJointType.BEVEL)\n\n grp = VGroup(t1, t2, t3).arrange(RIGHT)\n grp.set(width=config.frame_width - 1)\n\n self.add(grp)\n\n\n# See many more examples at https://docs.manim.community/en/stable/examples.html\n",
"path": "example_scenes/basic.py"
}
] | [
{
"content": "#!/usr/bin/env python\n\n\nfrom manim import *\n\n# To watch one of these scenes, run the following:\n# python --quality m manim -p example_scenes.py SquareToCircle\n#\n# Use the flag --quality l for a faster rendering at a lower quality.\n# Use -s to skip to the end and just save the final frame\n# Use the -p to have preview of the animation (or image, if -s was\n# used) pop up once done.\n# Use -n <number> to skip ahead to the nth animation of a scene.\n# Use -r <number> to specify a resolution (for example, -r 1920,1080\n# for a 1920x1080 video)\n\n\nclass OpeningManim(Scene):\n def construct(self):\n title = Tex(r\"This is some \\LaTeX\")\n basel = MathTex(r\"\\sum_{n=1}^\\infty \\frac{1}{n^2} = \\frac{\\pi^2}{6}\")\n VGroup(title, basel).arrange(DOWN)\n self.play(\n Write(title),\n FadeIn(basel, shift=DOWN),\n )\n self.wait()\n\n transform_title = Tex(\"That was a transform\")\n transform_title.to_corner(UP + LEFT)\n self.play(\n Transform(title, transform_title),\n LaggedStart(*(FadeOut(obj, shift=DOWN) for obj in basel)),\n )\n self.wait()\n\n grid = NumberPlane()\n grid_title = Tex(\"This is a grid\", font_size=72)\n grid_title.move_to(transform_title)\n\n self.add(grid, grid_title) # Make sure title is on top of grid\n self.play(\n FadeOut(title),\n FadeIn(grid_title, shift=UP),\n Create(grid, run_time=3, lag_ratio=0.1),\n )\n self.wait()\n\n grid_transform_title = Tex(\n r\"That was a non-linear function \\\\ applied to the grid\",\n )\n grid_transform_title.move_to(grid_title, UL)\n grid.prepare_for_nonlinear_transform()\n self.play(\n grid.animate.apply_function(\n lambda p: p\n + np.array(\n [\n np.sin(p[1]),\n np.sin(p[0]),\n 0,\n ],\n ),\n ),\n run_time=3,\n )\n self.wait()\n self.play(Transform(grid_title, grid_transform_title))\n self.wait()\n\n\nclass SquareToCircle(Scene):\n def construct(self):\n circle = Circle()\n square = Square()\n square.flip(RIGHT)\n square.rotate(-3 * TAU / 8)\n circle.set_fill(PINK, opacity=0.5)\n\n self.play(Create(square))\n self.play(Transform(square, circle))\n self.play(FadeOut(square))\n\n\nclass WarpSquare(Scene):\n def construct(self):\n square = Square()\n self.play(\n ApplyPointwiseFunction(\n lambda point: complex_to_R3(np.exp(R3_to_complex(point))),\n square,\n ),\n )\n self.wait()\n\n\nclass WriteStuff(Scene):\n def construct(self):\n example_text = Tex(\"This is a some text\", tex_to_color_map={\"text\": YELLOW})\n example_tex = MathTex(\n \"\\\\sum_{k=1}^\\\\infty {1 \\\\over k^2} = {\\\\pi^2 \\\\over 6}\",\n )\n group = VGroup(example_text, example_tex)\n group.arrange(DOWN)\n group.width = config[\"frame_width\"] - 2 * LARGE_BUFF\n\n self.play(Write(example_text))\n self.play(Write(example_tex))\n self.wait()\n\n\nclass UpdatersExample(Scene):\n def construct(self):\n decimal = DecimalNumber(\n 0,\n show_ellipsis=True,\n num_decimal_places=3,\n include_sign=True,\n )\n square = Square().to_edge(UP)\n\n decimal.add_updater(lambda d: d.next_to(square, RIGHT))\n decimal.add_updater(lambda d: d.set_value(square.get_center()[1]))\n self.add(square, decimal)\n self.play(\n square.animate.to_edge(DOWN),\n rate_func=there_and_back,\n run_time=5,\n )\n self.wait()\n\n\nclass SpiralInExample(Scene):\n def construct(self):\n logo_green = \"#81b29a\"\n logo_blue = \"#454866\"\n logo_red = \"#e07a5f\"\n\n font_color = \"#ece6e2\"\n\n pi = MathTex(r\"\\pi\").scale(7).set_color(font_color)\n pi.shift(2.25 * LEFT + 1.5 * UP)\n\n circle = Circle(color=logo_green, fill_opacity=0.7, stroke_width=0).shift(LEFT)\n square = Square(color=logo_blue, fill_opacity=0.8, stroke_width=0).shift(UP)\n triangle = Triangle(color=logo_red, fill_opacity=0.9, stroke_width=0).shift(\n RIGHT\n )\n pentagon = Polygon(\n *[\n [np.cos(2 * np.pi / 5 * i), np.sin(2 * np.pi / 5 * i), 0]\n for i in range(5)\n ],\n color=PURPLE_B,\n fill_opacity=1,\n stroke_width=0\n ).shift(UP + 2 * RIGHT)\n shapes = VGroup(triangle, square, circle, pentagon, pi)\n self.play(SpiralIn(shapes, fade_in_fraction=0.9))\n self.wait()\n self.play(FadeOut(shapes))\n\n\nTriangle.set_default(stroke_width=20)\n\n\nclass LineJoints(Scene):\n def construct(self):\n t1 = Triangle()\n t2 = Triangle(joint_type=LineJointType.ROUND)\n t3 = Triangle(joint_type=LineJointType.BEVEL)\n\n grp = VGroup(t1, t2, t3).arrange(RIGHT)\n grp.set(width=config.frame_width - 1)\n\n self.add(grp)\n\n\n# See many more examples at https://docs.manim.community/en/stable/examples.html\n",
"path": "example_scenes/basic.py"
}
] | diff --git a/example_scenes/basic.py b/example_scenes/basic.py
index cf5f475223..e61ae01f67 100644
--- a/example_scenes/basic.py
+++ b/example_scenes/basic.py
@@ -167,8 +167,8 @@ def construct(self):
class LineJoints(Scene):
def construct(self):
t1 = Triangle()
- t2 = Triangle(line_join=LineJointType.ROUND)
- t3 = Triangle(line_join=LineJointType.BEVEL)
+ t2 = Triangle(joint_type=LineJointType.ROUND)
+ t3 = Triangle(joint_type=LineJointType.BEVEL)
grp = VGroup(t1, t2, t3).arrange(RIGHT)
grp.set(width=config.frame_width - 1)
| keyword argument 'line_join'
## Description of bug / unexpected behavior
When I rendering the line joint scene from basic.py from the example scene it shows Mobject.__init__() got an unexpected keyword argument 'line_join'
## How to reproduce the issue
<!-- Provide a piece of code illustrating the undesired behavior. -->
<details><summary>Code for reproducing the problem</summary>
```py
class LineJoints(Scene):
def construct(self):
t1 = Triangle()
t2 = Triangle(line_join=LineJointType.ROUND)
t3 = Triangle(line_join=LineJointType.BEVEL)
grp = VGroup(t1, t2, t3).arrange(RIGHT)
grp.set(width=config.frame_width - 1)
self.add(grp)
```
</details>
## Logs
<details><summary>Virtual Code Studio output</summary>
<!-- Add "-v DEBUG" when calling manim to generate more detailed logs -->
```
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ C:\tools\Manim\Lib\site-packages\manim\cli\render\commands.py:115 in render │
│ │
│ 112 │ │ │ try: │
│ 113 │ │ │ │ with tempconfig({}): │
│ 114 │ │ │ │ │ scene = SceneClass() │
│ ❱ 115 │ │ │ │ │ scene.render() │
│ 116 │ │ │ except Exception: │
│ 117 │ │ │ │ error_console.print_exception() │
│ 118 │ │ │ │ sys.exit(1) │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\scene\scene.py:223 in render │
│ │
│ 220 │ │ """ │
│ 221 │ │ self.setup() │
│ 222 │ │ try: │
│ ❱ 223 │ │ │ self.construct() │
│ 224 │ │ except EndSceneEarlyException: │
│ 225 │ │ │ pass │
│ 226 │ │ except RerunSceneException as e: │
│ │
│ C:\Users\HP\Documents\ManimCE\basic.py:170 in construct │
│ │
│ 167 class LineJoints(Scene): │
│ 168 │ def construct(self): │
│ 169 │ │ t1 = Triangle() │
│ ❱ 170 │ │ t2 = Triangle(line_join=LineJointType.ROUND) │
│ 171 │ │ t3 = Triangle(line_join=LineJointType.BEVEL) │
│ 172 │ │ │
│ 173 │ │ grp = VGroup(t1, t2, t3).arrange(RIGHT) │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:559 in __init__ │
│ │
│ 556 │ """ │
│ 557 │ │
│ 558 │ def __init__(self, **kwargs): │
│ ❱ 559 │ │ super().__init__(n=3, **kwargs) │
│ 560 │
│ 561 │
│ 562 class Rectangle(Polygon): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:428 in __init__ │
│ │
│ 425 │ """ │
│ 426 │ │
│ 427 │ def __init__(self, n: int = 6, **kwargs): │
│ ❱ 428 │ │ super().__init__(n, density=1, **kwargs) │
│ 429 │
│ 430 │
│ 431 class Star(Polygon): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:399 in __init__ │
│ │
│ 396 │ │ │ │
│ 397 │ │ │ vertex_groups.append(group) │
│ 398 │ │ │
│ ❱ 399 │ │ super().__init__(*vertex_groups, **kwargs) │
│ 400 │
│ 401 │
│ 402 class RegularPolygon(RegularPolygram): │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\geometry\polygram.py:69 in __init__ │
│ │
│ 66 │ """ │
│ 67 │ │
│ 68 │ def __init__(self, *vertex_groups: Iterable[Sequence[float]], color=BLUE, **kwargs): │
│ ❱ 69 │ │ super().__init__(color=color, **kwargs) │
│ 70 │ │ │
│ 71 │ │ for vertices in vertex_groups: │
│ 72 │ │ │ first_vertex, *vertices = vertices │
│ │
│ C:\tools\Manim\Lib\site-packages\manim\mobject\types\vectorized_mobject.py:125 in __init__ │
│ │
│ 122 │ │ self.shade_in_3d = shade_in_3d │
│ 123 │ │ self.tolerance_for_point_equality = tolerance_for_point_equality │
│ 124 │ │ self.n_points_per_cubic_curve = n_points_per_cubic_curve │
│ ❱ 125 │ │ super().__init__(**kwargs) │
│ 126 │ │ │
│ 127 │ │ if fill_color: │
│ 128 │ │ │ self.fill_color = fill_color │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
TypeError: Mobject.__init__() got an unexpected keyword argument 'line_join'
```
</details>
<details><summary>CMD output</summary>
Traceback (most recent call last):
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\__init__.py", line 24, in <module>
from . import multiarray
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\multiarray.py", line 10, in <module>
from . import overrides
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\overrides.py", line 8, in <module>
from numpy.core._multiarray_umath import (
ModuleNotFoundError: No module named 'numpy.core._multiarray_umath'
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__init__.py", line 158, in <module>
from numpy.__config__ import show as show_config
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__config__.py", line 4, in <module>
from numpy.core._multiarray_umath import (
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\core\__init__.py", line 50, in <module>
raise ImportError(msg)
ImportError:
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
Importing the numpy C-extensions failed. This error can happen for
many reasons, often due to issues with your setup or how NumPy was
installed.
We have compiled some common reasons and troubleshooting tips at:
https://numpy.org/devdocs/user/troubleshooting-importerror.html
Please note and check the following:
* The Python version is: Python3.11 from "C:\Users\HP\Documents\ManimCE\mce\Scripts\python.exe"
* The NumPy version is: "1.26.0"
and make sure that they are the versions you expect.
Please carefully study the documentation linked above for further help.
Original error was: No module named 'numpy.core._multiarray_umath'
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "<frozen runpy>", line 198, in _run_module_as_main
File "<frozen runpy>", line 88, in _run_code
File "C:\Users\HP\Documents\ManimCE\mce\Scripts\manim.exe\__main__.py", line 4, in <module>
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\__init__.py", line 17, in <module>
from ._config import *
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\_config\__init__.py", line 10, in <module>
from .utils import ManimConfig, ManimFrame, make_config_parser
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\manim\_config\utils.py", line 27, in <module>
import numpy as np
File "C:\Users\HP\Documents\ManimCE\mce\Lib\site-packages\numpy\__init__.py", line 163, in <module>
raise ImportError(msg) from e
ImportError: Error importing numpy: you should not try to import numpy from
its source directory; please exit the numpy source tree, and relaunch
your python interpreter from there.
</details>
## System specifications
<details><summary>System Details</summary>
- OS Windows 10
- Python version (3.11.5)
|
deepchecks__deepchecks-728 | [
{
"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nfrom distutils.util import convert_path\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\n\n\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n",
"path": "setup.py"
}
] | [
{
"content": "# ----------------------------------------------------------------------------\n# Copyright (C) 2021 Deepchecks (https://www.deepchecks.com)\n#\n# This file is part of Deepchecks.\n# Deepchecks is distributed under the terms of the GNU Affero General\n# Public License (version 3 or later).\n# You should have received a copy of the GNU Affero General Public License\n# along with Deepchecks. If not, see <http://www.gnu.org/licenses/>.\n# ----------------------------------------------------------------------------\n#\n\"\"\"\n\n|build| |Documentation Status| |pkgVersion| |pyVersions|\n|Maintainability| |Coverage Status|\n\n.. image:: https://raw.githubusercontent.com/deepchecks/deepchecks/main/docs/images/deepchecks-logo-with-white-wide-back.png\n :target: https://github.com/deepchecks/deepchecks\n\nDeepchecks is a Python package for comprehensively validating your machine learning models and data with minimal effort.\nThis includes checks related to various types of issues, such as model performance, data integrity,\ndistribution mismatches, and more.\n\nWhat Do You Need in Order to Start Validating?\n----------------------------------------------\n\nDepending on your phase and what you wise to validate, you'll need a\nsubset of the following:\n\n- Raw data (before pre-processing such as OHE, string processing,\n etc.), with optional labels\n\n- The model's training data with labels\n\n- Test data (which the model isn't exposed to) with labels\n\n- A model compatible with scikit-learn API that you wish to validate\n (e.g. RandomForest, XGBoost)\n\nDeepchecks validation accompanies you from the initial phase when you\nhave only raw data, through the data splits, and to the final stage of\nhaving a trained model that you wish to evaluate. Accordingly, each\nphase requires different assets for the validation. See more about\ntypical usage scenarios and the built-in suites in the\n`docs <https://docs.deepchecks.com/?utm_source=pypi.org&utm_medium=referral&utm_campaign=readme>`__.\n\nInstallation\n------------\n\nUsing pip\n~~~~~~~~~\n\n.. code:: bash\n\n pip install deepchecks #--upgrade --user\n\nUsing conda\n~~~~~~~~~~~\n\n.. code:: bash\n\n conda install -c deepchecks deepchecks\n\n.. |build| image:: https://github.com/deepchecks/deepchecks/actions/workflows/build.yml/badge.svg\n.. |Documentation Status| image:: https://readthedocs.org/projects/deepchecks/badge/?version=latest\n :target: https://docs.deepchecks.com/en/latest/?badge=latest\n.. |pkgVersion| image:: https://img.shields.io/pypi/v/deepchecks\n.. |pyVersions| image:: https://img.shields.io/pypi/pyversions/deepchecks\n.. |Maintainability| image:: https://api.codeclimate.com/v1/badges/970b11794144139975fa/maintainability\n :target: https://codeclimate.com/github/deepchecks/deepchecks/maintainability\n.. |Coverage Status| image:: https://coveralls.io/repos/github/deepchecks/deepchecks/badge.svg?branch=main\n :target: https://coveralls.io/github/deepchecks/deepchecks?branch=main\n\n\"\"\"\n\nimport setuptools\nfrom setuptools import setup\nimport os\n\nmain_ns = {}\nDOCLINES = (__doc__ or '').split(\"\\n\")\n\nwith open(os.path.join('./', 'VERSION')) as version_file:\n VER = version_file.read().strip()\n\nrequirementPath = os.path.dirname(os.path.realpath(__file__)) + '/requirements.txt'\ninstall_requires = []\nif os.path.isfile(requirementPath):\n with open(requirementPath) as f:\n install_requires = f.read().splitlines()\n\nsetup(\n name='deepchecks',\n version=VER,\n packages=setuptools.find_packages(),\n install_requires=install_requires,\n license_files = ('LICENSE', ),\n description = DOCLINES[0],\n long_description=\"\\n\".join(DOCLINES[2:]),\n author = 'deepchecks', \n author_email = '[email protected]', \n url = 'https://github.com/deepchecks/deepchecks',\n download_url = \"https://github.com/deepchecks/deepchecks/releases/download/{0}/deepchecks-{0}.tar.gz\".format(VER),\n keywords = ['Software Development', 'Machine Learning'],\n include_package_data=True,\n classifiers = [\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'License :: OSI Approved :: GNU Affero General Public License v3 or later (AGPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n 'Programming Language :: Python :: 3.9',\n 'Programming Language :: Python :: 3.10',\n ],\n)\n",
"path": "setup.py"
}
] | diff --git a/MANIFEST.in b/MANIFEST.in
index e3c8bf6744..b7e63381bf 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,5 @@
include requirements.txt
include LICENSE
include VERSION
-include README.md
\ No newline at end of file
+include README.md
+recursive-include deepchecks/base/resources *
\ No newline at end of file
diff --git a/VERSION b/VERSION
index 9e11b32fca..d15723fbe8 100644
--- a/VERSION
+++ b/VERSION
@@ -1 +1 @@
-0.3.1
+0.3.2
diff --git a/setup.py b/setup.py
index 110f44af8c..96c21c42d6 100644
--- a/setup.py
+++ b/setup.py
@@ -74,7 +74,6 @@
import setuptools
from setuptools import setup
-from distutils.util import convert_path
import os
main_ns = {}
@@ -89,9 +88,6 @@
with open(requirementPath) as f:
install_requires = f.read().splitlines()
-
-
-
setup(
name='deepchecks',
version=VER,
| [BUG] resources/suite_output.html file is missing when installing not via git
**Describe the bug**
can't use save_as_html because suite_output.html file is missing
**To Reproduce**
pip install deepchecks
suite_result.save_as_html()
**Expected behavior**
save as html
**Environment (please complete the following information):**
- OS: linux
- Python Version: 3.7
- Deepchecks Version: 0.3.1
|
zostera__django-bootstrap3-90 | [
{
"content": "from __future__ import unicode_literals\n\nfrom django.contrib.admin.widgets import AdminFileWidget\nfrom django.forms import HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, RadioSelect, \\\n CheckboxInput, ClearableFileInput\nfrom django.forms.extras import SelectDateWidget\nfrom django.forms.forms import BaseForm, BoundField\nfrom django.forms.formsets import BaseFormSet\nfrom django.utils.encoding import force_text\nfrom django.utils.html import conditional_escape, strip_tags\n\nfrom .bootstrap import get_bootstrap_setting\nfrom .text import text_concat\nfrom .exceptions import BootstrapError\nfrom .html import add_css_class, render_tag\nfrom .icons import render_icon\n\n\nFORM_GROUP_CLASS = 'form-group'\n\n\ndef render_formset(formset, **kwargs):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(formset, BaseFormSet):\n raise BootstrapError('Parameter \"formset\" should contain a valid Django FormSet.')\n forms = [render_form(f, **kwargs) for f in formset]\n return force_text(formset.management_form) + '\\n' + '\\n'.join(forms)\n\n\ndef render_form(form, layout='', form_group_class=FORM_GROUP_CLASS, field_class='', label_class='', show_help=True,\n exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(form, BaseForm):\n raise BootstrapError('Parameter \"form\" should contain a valid Django Form.')\n html = ''\n errors = []\n fields = []\n for field in form:\n fields.append(render_field(\n field,\n layout=layout,\n form_group_class=form_group_class,\n field_class=field_class,\n label_class=label_class,\n show_help=show_help,\n exclude=exclude,\n set_required=set_required,\n ))\n if field.is_hidden and field.errors:\n errors += field.errors\n errors += form.non_field_errors()\n if errors:\n html += '''<div class=\"alert alert-danger alert-dismissable alert-link\">\n <button class=close data-dismiss=alert aria-hidden=true>\n ×</button>{errors}</div>\\n\n '''.format(errors='\\n'.join(['<p>{e}</p>'.format(e=e) for e in errors]))\n return html + '\\n'.join(fields)\n\n\ndef render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,\n field_class=None, label_class=None, show_label=True,\n show_help=True, exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(field, BoundField):\n raise BootstrapError('Parameter \"field\" should contain a valid Django BoundField.')\n # See if we're not excluded\n if field.name in exclude.replace(' ', '').split(','):\n return ''\n # Hidden input requires no special treatment\n if field.is_hidden:\n return force_text(field)\n # Shortcut to widget\n widget = field.field.widget\n # Read widgets attributes\n widget_attrs = {\n 'class': widget.attrs.get('class', ''),\n 'placeholder': widget.attrs.get('placeholder', ''),\n 'title': widget.attrs.get('title', ''),\n }\n # Class to add to field element\n if isinstance(widget, FileInput):\n form_control_class = ''\n else:\n form_control_class = 'form-control'\n # Optional extra rendering\n after_render = None\n # Wrap rendered field in its own label?\n put_inside_label = False\n # Wrapper for the final result (should contain {content} if not empty)\n wrapper = ''\n\n # Adjust workings for various widget types\n if isinstance(field.field.widget, CheckboxInput):\n form_control_class = ''\n put_inside_label = True\n wrapper = '<div class=\"checkbox\">{content}</div>'\n elif isinstance(widget, RadioSelect):\n form_control_class = ''\n after_render = list_to_class('radio')\n elif isinstance(widget, CheckboxSelectMultiple):\n form_control_class = ''\n after_render = list_to_class('checkbox')\n elif isinstance(widget, SelectDateWidget):\n after_render = fix_date_select_input\n elif isinstance(widget, ClearableFileInput):\n after_render = fix_clearable_file_input\n\n # Get help text\n field_help = force_text(field.help_text) if show_help and field.help_text else ''\n # Get errors\n field_errors = [conditional_escape(force_text(error)) for error in field.errors]\n # Temporarily adjust widget attributes if necessary\n if form_control_class:\n widget.attrs['class'] = add_css_class(widget_attrs['class'], form_control_class)\n if is_widget_with_placeholder(widget) and field.label and not put_inside_label and not widget_attrs['placeholder']:\n widget.attrs['placeholder'] = field.label\n if field_help and not put_inside_label and not widget_attrs['title']:\n widget.attrs['title'] = strip_tags(field_help)\n if layout == 'inline' and field_errors:\n field_title = widget.attrs.get('title', '')\n field_title += ' ' + ' '.join([strip_tags(e) for e in field_errors])\n widget.attrs['title'] = field_title.strip()\n # Set required attribute\n if set_required and is_widget_required_attribute(widget):\n widget.attrs['required'] = 'required'\n # Render the field\n rendered_field = field.as_widget(attrs=widget.attrs)\n # Apply the post_processor\n if after_render:\n rendered_field = after_render(rendered_field)\n # Return changed attributes to original settings\n for attr in widget_attrs:\n widget.attrs[attr] = widget_attrs[attr]\n # Wrap the rendered field in its label if necessary\n if put_inside_label:\n rendered_field = render_label(\n content='{field} {label}'.format(field=rendered_field, label=field.label),\n label_title=field.help_text\n )\n # Add any help text and/or errors\n if layout != 'inline':\n help_text_and_errors = [field_help] + field_errors\n if help_text_and_errors:\n help_html = ' '.join([h for h in help_text_and_errors if h])\n rendered_field += '<span class=help-block>{help}</span>'.format(help=help_html)\n # Wrap the rendered field\n if wrapper:\n rendered_field = wrapper.format(content=rendered_field)\n # Prepare label\n label = field.label\n if put_inside_label:\n label = None\n if layout == 'inline' or not show_label:\n label_class = add_css_class(label_class, 'sr-only')\n # Render label and field\n content = render_field_and_label(\n field=rendered_field,\n label=label,\n field_class=field_class,\n label_class=label_class,\n layout=layout,\n )\n # Return combined content, wrapped in form control\n if field.errors:\n form_group_class = add_css_class(form_group_class, 'has-error')\n elif field.form.is_bound:\n form_group_class = add_css_class(form_group_class, 'has-success')\n\n return render_form_group(content, form_group_class)\n\n\ndef render_label(content, label_for=None, label_class=None, label_title=''):\n \"\"\"\n Render a label with content\n \"\"\"\n attrs = {}\n if label_for:\n attrs['for'] = label_for\n if label_class:\n attrs['class'] = label_class\n if label_title:\n attrs['title'] = label_title\n return render_tag('label', attrs=attrs, content=content)\n\n\ndef render_button(content, button_type=None, icon=None):\n attrs = {'class': 'btn'}\n if button_type:\n if button_type == 'submit':\n attrs['class'] += ' btn-primary'\n elif button_type != 'reset' and button_type != 'button':\n raise BootstrapError('Parameter \"button_type\" should be \"submit\", \"reset\", \"button\" or empty.')\n attrs['type'] = button_type\n icon_content = render_icon(icon) if icon else ''\n return render_tag('button', attrs=attrs, content=text_concat(icon_content, content, separator=' '))\n\n\ndef render_field_and_label(field, label, field_class='', label_class='', layout='', **kwargs):\n # Default settings for horizontal form\n if layout == 'horizontal':\n if not label_class:\n label_class = get_bootstrap_setting('horizontal_label_class')\n if not field_class:\n field_class = get_bootstrap_setting('horizontal_field_class')\n if not label:\n label = ' '\n label_class = add_css_class(label_class, 'control-label')\n html = field\n if field_class:\n html = '<div class=\"{klass}\">{html}</div>'.format(klass=field_class, html=html)\n if label:\n html = render_label(label, label_class=label_class) + html\n return html\n\n\ndef render_form_group(content, css_class=FORM_GROUP_CLASS):\n \"\"\"\n Render a Bootstrap form group\n \"\"\"\n return '<div class=\"{klass}\">{content}</div>'.format(\n klass=css_class,\n content=content,\n )\n\n\ndef is_widget_required_attribute(widget):\n \"\"\"\n Is this widget required?\n \"\"\"\n if not get_bootstrap_setting('set_required'):\n return False\n if not widget.is_required:\n return False\n if isinstance(widget, (AdminFileWidget, HiddenInput, FileInput, CheckboxSelectMultiple)):\n return False\n return True\n\n\ndef is_widget_with_placeholder(widget):\n \"\"\"\n Is this a widget that should have a placeholder?\n Only text, search, url, tel, e-mail, password, number have placeholders\n These are all derived form TextInput, except for Textarea\n \"\"\"\n return isinstance(widget, (TextInput, Textarea))\n\n\ndef list_to_class(klass):\n def fixer(html):\n mapping = [\n ('<ul', '<div'),\n ('</ul>', '</div>'),\n ('<li', '<div class=\"{klass}\"'.format(klass=klass)),\n ('</li>', '</div>'),\n ]\n for k, v in mapping:\n html = html.replace(k, v)\n return html\n\n return fixer\n\n\ndef surround_with(html_with_content):\n def wrapper(html):\n return html_with_content.format(content=html)\n\n return wrapper\n\n\ndef fix_date_select_input(html):\n div1 = '<div class=\"col-xs-4\">'\n div2 = '</div>'\n html = html.replace('<select', div1 + '<select')\n html = html.replace('</select>', '</select>' + div2)\n return '<div class=\"row bootstrap3-multi-input\">' + html + '</div>'\n\n\ndef fix_clearable_file_input(html):\n \"\"\"\n Fix a clearable file input\n TODO: This needs improvement\n\n Currently Django returns\n Currently: <a href=\"dummy.txt\">dummy.txt</a> <input id=\"file4-clear_id\" name=\"file4-clear\" type=\"checkbox\" /> <label for=\"file4-clear_id\">Clear</label><br />Change: <input id=\"id_file4\" name=\"file4\" type=\"file\" /><span class=help-block></span></div>\n\n \"\"\"\n # TODO This needs improvement\n return '<div class=\"row bootstrap3-multi-input\"><div class=\"col-xs-12\">' + html + '</div></div>'\n",
"path": "bootstrap3/forms.py"
}
] | [
{
"content": "from __future__ import unicode_literals\n\nfrom django.contrib.admin.widgets import AdminFileWidget\nfrom django.forms import HiddenInput, FileInput, CheckboxSelectMultiple, Textarea, TextInput, RadioSelect, \\\n CheckboxInput, ClearableFileInput\nfrom django.forms.extras import SelectDateWidget\nfrom django.forms.forms import BaseForm, BoundField\nfrom django.forms.formsets import BaseFormSet\nfrom django.utils.encoding import force_text\nfrom django.utils.html import conditional_escape, strip_tags\n\nfrom .bootstrap import get_bootstrap_setting\nfrom .text import text_concat\nfrom .exceptions import BootstrapError\nfrom .html import add_css_class, render_tag\nfrom .icons import render_icon\n\n\nFORM_GROUP_CLASS = 'form-group'\n\n\ndef render_formset(formset, **kwargs):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(formset, BaseFormSet):\n raise BootstrapError('Parameter \"formset\" should contain a valid Django FormSet.')\n forms = [render_form(f, **kwargs) for f in formset]\n return force_text(formset.management_form) + '\\n' + '\\n'.join(forms)\n\n\ndef render_form(form, layout='', form_group_class=FORM_GROUP_CLASS, field_class='', label_class='', show_help=True,\n exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(form, BaseForm):\n raise BootstrapError('Parameter \"form\" should contain a valid Django Form.')\n html = ''\n errors = []\n fields = []\n for field in form:\n fields.append(render_field(\n field,\n layout=layout,\n form_group_class=form_group_class,\n field_class=field_class,\n label_class=label_class,\n show_help=show_help,\n exclude=exclude,\n set_required=set_required,\n ))\n if field.is_hidden and field.errors:\n errors += field.errors\n errors += form.non_field_errors()\n if errors:\n html += '''<div class=\"alert alert-danger alert-dismissable alert-link\">\n <button class=close data-dismiss=alert aria-hidden=true>\n ×</button>{errors}</div>\\n\n '''.format(errors='\\n'.join(['<p>{e}</p>'.format(e=e) for e in errors]))\n return html + '\\n'.join(fields)\n\n\ndef render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,\n field_class=None, label_class=None, show_label=True,\n show_help=True, exclude='', set_required=True):\n \"\"\"\n Render a formset to a Bootstrap layout\n \"\"\"\n if not isinstance(field, BoundField):\n raise BootstrapError('Parameter \"field\" should contain a valid Django BoundField.')\n # See if we're not excluded\n if field.name in exclude.replace(' ', '').split(','):\n return ''\n # Hidden input requires no special treatment\n if field.is_hidden:\n return force_text(field)\n # Shortcut to widget\n widget = field.field.widget\n # Read widgets attributes\n widget_attrs = {\n 'class': widget.attrs.get('class', ''),\n 'placeholder': widget.attrs.get('placeholder', ''),\n 'title': widget.attrs.get('title', ''),\n }\n # Class to add to field element\n if isinstance(widget, FileInput):\n form_control_class = ''\n else:\n form_control_class = 'form-control'\n # Optional extra rendering\n after_render = None\n # Wrap rendered field in its own label?\n put_inside_label = False\n # Wrapper for the final result (should contain {content} if not empty)\n wrapper = ''\n\n # Adjust workings for various widget types\n if isinstance(field.field.widget, CheckboxInput):\n form_control_class = ''\n put_inside_label = True\n wrapper = '<div class=\"checkbox\">{content}</div>'\n elif isinstance(widget, RadioSelect):\n form_control_class = ''\n after_render = list_to_class('radio')\n elif isinstance(widget, CheckboxSelectMultiple):\n form_control_class = ''\n after_render = list_to_class('checkbox')\n elif isinstance(widget, SelectDateWidget):\n after_render = fix_date_select_input\n elif isinstance(widget, ClearableFileInput):\n after_render = fix_clearable_file_input\n\n # Get help text\n field_help = force_text(field.help_text) if show_help and field.help_text else ''\n # Get errors\n field_errors = [conditional_escape(force_text(error)) for error in field.errors]\n # Temporarily adjust widget attributes if necessary\n if form_control_class:\n widget.attrs['class'] = add_css_class(widget_attrs['class'], form_control_class)\n if is_widget_with_placeholder(widget) and field.label and not put_inside_label and not widget_attrs['placeholder']:\n widget.attrs['placeholder'] = field.label\n if field_help and not put_inside_label and not widget_attrs['title']:\n widget.attrs['title'] = strip_tags(field_help)\n if layout == 'inline' and field_errors:\n field_title = widget.attrs.get('title', '')\n field_title += ' ' + ' '.join([strip_tags(e) for e in field_errors])\n widget.attrs['title'] = field_title.strip()\n # Set required attribute\n if set_required and is_widget_required_attribute(widget):\n widget.attrs['required'] = 'required'\n # Render the field\n rendered_field = field.as_widget(attrs=widget.attrs)\n # Apply the post_processor\n if after_render:\n rendered_field = after_render(rendered_field)\n # Return changed attributes to original settings\n for attr in widget_attrs:\n widget.attrs[attr] = widget_attrs[attr]\n # Wrap the rendered field in its label if necessary\n if put_inside_label:\n rendered_field = render_label(\n content='{field} {label}'.format(field=rendered_field, label=field.label),\n label_title=field.help_text\n )\n # Add any help text and/or errors\n if layout != 'inline':\n help_text_and_errors = [field_help] + field_errors\n if help_text_and_errors:\n help_html = ' '.join([h for h in help_text_and_errors if h])\n rendered_field += '<span class=help-block>{help}</span>'.format(help=help_html)\n # Wrap the rendered field\n if wrapper:\n rendered_field = wrapper.format(content=rendered_field)\n # Prepare label\n label = field.label\n if put_inside_label:\n label = None\n if layout == 'inline' or not show_label:\n label_class = add_css_class(label_class, 'sr-only')\n # Render label and field\n content = render_field_and_label(\n field=rendered_field,\n label=label,\n field_class=field_class,\n label_class=label_class,\n layout=layout,\n )\n # Return combined content, wrapped in form control\n if field.errors:\n form_group_class = add_css_class(form_group_class, 'has-error')\n elif field.form.is_bound:\n form_group_class = add_css_class(form_group_class, 'has-success')\n\n # Required and optional classes to the form group\n if field.field.required:\n form_group_class = add_css_class(form_group_class, 'required')\n else:\n form_group_class = add_css_class(form_group_class, 'optional')\n\n return render_form_group(content, form_group_class)\n\n\ndef render_label(content, label_for=None, label_class=None, label_title=''):\n \"\"\"\n Render a label with content\n \"\"\"\n attrs = {}\n if label_for:\n attrs['for'] = label_for\n if label_class:\n attrs['class'] = label_class\n if label_title:\n attrs['title'] = label_title\n return render_tag('label', attrs=attrs, content=content)\n\n\ndef render_button(content, button_type=None, icon=None):\n attrs = {'class': 'btn'}\n if button_type:\n if button_type == 'submit':\n attrs['class'] += ' btn-primary'\n elif button_type != 'reset' and button_type != 'button':\n raise BootstrapError('Parameter \"button_type\" should be \"submit\", \"reset\", \"button\" or empty.')\n attrs['type'] = button_type\n icon_content = render_icon(icon) if icon else ''\n return render_tag('button', attrs=attrs, content=text_concat(icon_content, content, separator=' '))\n\n\ndef render_field_and_label(field, label, field_class='', label_class='', layout='', **kwargs):\n # Default settings for horizontal form\n if layout == 'horizontal':\n if not label_class:\n label_class = get_bootstrap_setting('horizontal_label_class')\n if not field_class:\n field_class = get_bootstrap_setting('horizontal_field_class')\n if not label:\n label = ' '\n label_class = add_css_class(label_class, 'control-label')\n html = field\n if field_class:\n html = '<div class=\"{klass}\">{html}</div>'.format(klass=field_class, html=html)\n if label:\n html = render_label(label, label_class=label_class) + html\n return html\n\n\ndef render_form_group(content, css_class=FORM_GROUP_CLASS):\n \"\"\"\n Render a Bootstrap form group\n \"\"\"\n return '<div class=\"{klass}\">{content}</div>'.format(\n klass=css_class,\n content=content,\n )\n\n\ndef is_widget_required_attribute(widget):\n \"\"\"\n Is this widget required?\n \"\"\"\n if not get_bootstrap_setting('set_required'):\n return False\n if not widget.is_required:\n return False\n if isinstance(widget, (AdminFileWidget, HiddenInput, FileInput, CheckboxSelectMultiple)):\n return False\n return True\n\n\ndef is_widget_with_placeholder(widget):\n \"\"\"\n Is this a widget that should have a placeholder?\n Only text, search, url, tel, e-mail, password, number have placeholders\n These are all derived form TextInput, except for Textarea\n \"\"\"\n return isinstance(widget, (TextInput, Textarea))\n\n\ndef list_to_class(klass):\n def fixer(html):\n mapping = [\n ('<ul', '<div'),\n ('</ul>', '</div>'),\n ('<li', '<div class=\"{klass}\"'.format(klass=klass)),\n ('</li>', '</div>'),\n ]\n for k, v in mapping:\n html = html.replace(k, v)\n return html\n\n return fixer\n\n\ndef surround_with(html_with_content):\n def wrapper(html):\n return html_with_content.format(content=html)\n\n return wrapper\n\n\ndef fix_date_select_input(html):\n div1 = '<div class=\"col-xs-4\">'\n div2 = '</div>'\n html = html.replace('<select', div1 + '<select')\n html = html.replace('</select>', '</select>' + div2)\n return '<div class=\"row bootstrap3-multi-input\">' + html + '</div>'\n\n\ndef fix_clearable_file_input(html):\n \"\"\"\n Fix a clearable file input\n TODO: This needs improvement\n\n Currently Django returns\n Currently: <a href=\"dummy.txt\">dummy.txt</a> <input id=\"file4-clear_id\" name=\"file4-clear\" type=\"checkbox\" /> <label for=\"file4-clear_id\">Clear</label><br />Change: <input id=\"id_file4\" name=\"file4\" type=\"file\" /><span class=help-block></span></div>\n\n \"\"\"\n # TODO This needs improvement\n return '<div class=\"row bootstrap3-multi-input\"><div class=\"col-xs-12\">' + html + '</div></div>'\n",
"path": "bootstrap3/forms.py"
}
] | diff --git a/bootstrap3/forms.py b/bootstrap3/forms.py
index 1f99bdec..515c2bac 100644
--- a/bootstrap3/forms.py
+++ b/bootstrap3/forms.py
@@ -172,6 +172,12 @@ def render_field(field, layout='', form_group_class=FORM_GROUP_CLASS,
elif field.form.is_bound:
form_group_class = add_css_class(form_group_class, 'has-success')
+ # Required and optional classes to the form group
+ if field.field.required:
+ form_group_class = add_css_class(form_group_class, 'required')
+ else:
+ form_group_class = add_css_class(form_group_class, 'optional')
+
return render_form_group(content, form_group_class)
| Special display of required fields
It would be nice if there was some way to render differently required fields, like with boldface, or with and asterisk.
|
fossasia__open-event-server-6046 | [
{
"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\n\nimport sys\nfrom flask import Flask, json, make_response\nfrom flask_celeryext import FlaskCeleryExt\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom app.api.helpers.cache import cache\nfrom werkzeug.middleware.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup, change_session_state_on_event_completion, \\\n expire_pending_tickets_after_three_days\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n from app.api.auth import ticket_blueprint, authorised_blueprint\n from app.api.admin_translations import admin_blueprint\n from app.api.orders import alipay_blueprint\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n app.register_blueprint(ticket_blueprint)\n app.register_blueprint(authorised_blueprint)\n app.register_blueprint(admin_blueprint)\n app.register_blueprint(alipay_blueprint)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n ext = FlaskCeleryExt(app)\n return ext.celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, headers=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(headers['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)\nscheduler.add_job(expire_pending_tickets_after_three_days, 'cron', hour=5)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n",
"path": "app/__init__.py"
}
] | [
{
"content": "from celery.signals import after_task_publish\nimport logging\nimport os.path\nfrom envparse import env\n\nimport sys\nfrom flask import Flask, json, make_response\nfrom flask_celeryext import FlaskCeleryExt\nfrom app.settings import get_settings, get_setts\nfrom flask_migrate import Migrate, MigrateCommand\nfrom flask_script import Manager\nfrom flask_login import current_user\nfrom flask_jwt import JWT\nfrom datetime import timedelta\nfrom flask_cors import CORS\nfrom flask_rest_jsonapi.errors import jsonapi_errors\nfrom flask_rest_jsonapi.exceptions import JsonApiException\nfrom healthcheck import HealthCheck\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom elasticsearch_dsl.connections import connections\nfrom pytz import utc\n\nimport sqlalchemy as sa\n\nimport stripe\nfrom app.settings import get_settings\nfrom app.models import db\nfrom app.api.helpers.jwt import jwt_authenticate, jwt_identity\nfrom app.api.helpers.cache import cache\nfrom werkzeug.middleware.profiler import ProfilerMiddleware\nfrom app.views import BlueprintsManager\nfrom app.api.helpers.auth import AuthManager\nfrom app.api.helpers.scheduled_jobs import send_after_event_mail, send_event_fee_notification, \\\n send_event_fee_notification_followup, change_session_state_on_event_completion, \\\n expire_pending_tickets_after_three_days\nfrom app.models.event import Event\nfrom app.models.role_invite import RoleInvite\nfrom app.views.healthcheck import health_check_celery, health_check_db, health_check_migrations, check_migrations\nfrom app.views.elastic_search import client\nfrom app.views.elastic_cron_helpers import sync_events_elasticsearch, cron_rebuild_events_elasticsearch\nfrom app.views.redis_store import redis_store\nfrom app.views.celery_ import celery\nfrom app.templates.flask_ext.jinja.filters import init_filters\nimport sentry_sdk\nfrom sentry_sdk.integrations.flask import FlaskIntegration\n\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\n\nstatic_dir = os.path.dirname(os.path.dirname(__file__)) + \"/static\"\ntemplate_dir = os.path.dirname(__file__) + \"/templates\"\napp = Flask(__name__, static_folder=static_dir, template_folder=template_dir)\nenv.read_envfile()\n\n\nclass ReverseProxied(object):\n \"\"\"\n ReverseProxied flask wsgi app wrapper from http://stackoverflow.com/a/37842465/1562480 by aldel\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n scheme = environ.get('HTTP_X_FORWARDED_PROTO')\n if scheme:\n environ['wsgi.url_scheme'] = scheme\n if os.getenv('FORCE_SSL', 'no') == 'yes':\n environ['wsgi.url_scheme'] = 'https'\n return self.app(environ, start_response)\n\n\napp.wsgi_app = ReverseProxied(app.wsgi_app)\n\napp_created = False\n\n\ndef create_app():\n global app_created\n if not app_created:\n BlueprintsManager.register(app)\n Migrate(app, db)\n\n app.config.from_object(env('APP_CONFIG', default='config.ProductionConfig'))\n db.init_app(app)\n _manager = Manager(app)\n _manager.add_command('db', MigrateCommand)\n\n if app.config['CACHING']:\n cache.init_app(app, config={'CACHE_TYPE': 'simple'})\n else:\n cache.init_app(app, config={'CACHE_TYPE': 'null'})\n\n stripe.api_key = 'SomeStripeKey'\n app.secret_key = 'super secret key'\n app.config['JSONIFY_PRETTYPRINT_REGULAR'] = False\n app.config['FILE_SYSTEM_STORAGE_FILE_VIEW'] = 'static'\n\n app.logger.addHandler(logging.StreamHandler(sys.stdout))\n app.logger.setLevel(logging.ERROR)\n\n # set up jwt\n app.config['JWT_AUTH_USERNAME_KEY'] = 'email'\n app.config['JWT_EXPIRATION_DELTA'] = timedelta(seconds=24 * 60 * 60)\n app.config['JWT_AUTH_URL_RULE'] = '/auth/session'\n _jwt = JWT(app, jwt_authenticate, jwt_identity)\n\n # setup celery\n app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']\n app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']\n app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']\n\n CORS(app, resources={r\"/*\": {\"origins\": \"*\"}})\n AuthManager.init_login(app)\n\n if app.config['TESTING'] and app.config['PROFILE']:\n # Profiling\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n # development api\n with app.app_context():\n from app.api.admin_statistics_api.events import event_statistics\n from app.api.auth import auth_routes\n from app.api.attendees import attendee_misc_routes\n from app.api.bootstrap import api_v1\n from app.api.celery_tasks import celery_routes\n from app.api.event_copy import event_copy\n from app.api.exports import export_routes\n from app.api.imports import import_routes\n from app.api.uploads import upload_routes\n from app.api.users import user_misc_routes\n from app.api.orders import order_misc_routes\n from app.api.role_invites import role_invites_misc_routes\n from app.api.auth import ticket_blueprint, authorised_blueprint\n from app.api.admin_translations import admin_blueprint\n from app.api.orders import alipay_blueprint\n\n app.register_blueprint(api_v1)\n app.register_blueprint(event_copy)\n app.register_blueprint(upload_routes)\n app.register_blueprint(export_routes)\n app.register_blueprint(import_routes)\n app.register_blueprint(celery_routes)\n app.register_blueprint(auth_routes)\n app.register_blueprint(event_statistics)\n app.register_blueprint(user_misc_routes)\n app.register_blueprint(attendee_misc_routes)\n app.register_blueprint(order_misc_routes)\n app.register_blueprint(role_invites_misc_routes)\n app.register_blueprint(ticket_blueprint)\n app.register_blueprint(authorised_blueprint)\n app.register_blueprint(admin_blueprint)\n app.register_blueprint(alipay_blueprint)\n\n sa.orm.configure_mappers()\n\n if app.config['SERVE_STATIC']:\n app.add_url_rule('/static/<path:filename>',\n endpoint='static',\n view_func=app.send_static_file)\n\n # sentry\n if not app_created and 'SENTRY_DSN' in app.config:\n sentry_sdk.init(app.config['SENTRY_DSN'], integrations=[FlaskIntegration()])\n\n # redis\n redis_store.init_app(app)\n\n # elasticsearch\n if app.config['ENABLE_ELASTICSEARCH']:\n client.init_app(app)\n connections.add_connection('default', client.elasticsearch)\n with app.app_context():\n try:\n cron_rebuild_events_elasticsearch.delay()\n except Exception:\n pass\n\n app_created = True\n return app, _manager, db, _jwt\n\n\ncurrent_app, manager, database, jwt = create_app()\ninit_filters(app)\n\n\n# http://stackoverflow.com/questions/26724623/\[email protected]_request\ndef track_user():\n if current_user.is_authenticated:\n current_user.update_lat()\n\n\ndef make_celery(app=None):\n app = app or create_app()[0]\n celery.conf.update(app.config)\n ext = FlaskCeleryExt(app)\n return ext.celery\n\n\n# Health-check\nhealth = HealthCheck(current_app, \"/health-check\")\nhealth.add_check(health_check_celery)\nhealth.add_check(health_check_db)\nwith current_app.app_context():\n current_app.config['MIGRATION_STATUS'] = check_migrations()\nhealth.add_check(health_check_migrations)\n\n\n# http://stackoverflow.com/questions/9824172/find-out-whether-celery-task-exists\n@after_task_publish.connect\ndef update_sent_state(sender=None, headers=None, **kwargs):\n # the task may not exist if sent using `send_task` which\n # sends tasks by name, so fall back to the default result backend\n # if that is the case.\n task = celery.tasks.get(sender)\n backend = task.backend if task else celery.backend\n backend.store_result(headers['id'], None, 'WAITING')\n\n\n# register celery tasks. removing them will cause the tasks to not function. so don't remove them\n# it is important to register them after celery is defined to resolve circular imports\n\nfrom .api.helpers import tasks\n\n# import helpers.tasks\n\n\nscheduler = BackgroundScheduler(timezone=utc)\n# scheduler.add_job(send_mail_to_expired_orders, 'interval', hours=5)\n# scheduler.add_job(empty_trash, 'cron', hour=5, minute=30)\nif app.config['ENABLE_ELASTICSEARCH']:\n scheduler.add_job(sync_events_elasticsearch, 'interval', minutes=60)\n scheduler.add_job(cron_rebuild_events_elasticsearch, 'cron', day=7)\n\nscheduler.add_job(send_after_event_mail, 'cron', hour=5, minute=30)\nscheduler.add_job(send_event_fee_notification, 'cron', day=1)\nscheduler.add_job(send_event_fee_notification_followup, 'cron', day=15)\nscheduler.add_job(change_session_state_on_event_completion, 'cron', hour=5, minute=30)\nscheduler.add_job(expire_pending_tickets_after_three_days, 'cron', hour=5)\nscheduler.start()\n\n\[email protected](500)\ndef internal_server_error(error):\n if current_app.config['PROPOGATE_ERROR'] is True:\n exc = JsonApiException({'pointer': ''}, str(error))\n else:\n exc = JsonApiException({'pointer': ''}, 'Unknown error')\n return make_response(json.dumps(jsonapi_errors([exc.to_dict()])), exc.status,\n {'Content-Type': 'application/vnd.api+json'})\n\n\nif __name__ == '__main__':\n current_app.run()\n",
"path": "app/__init__.py"
}
] | diff --git a/app/__init__.py b/app/__init__.py
index 02d300a2ef..26b0c27fb9 100644
--- a/app/__init__.py
+++ b/app/__init__.py
@@ -108,6 +108,7 @@ def create_app():
# setup celery
app.config['CELERY_BROKER_URL'] = app.config['REDIS_URL']
app.config['CELERY_RESULT_BACKEND'] = app.config['CELERY_BROKER_URL']
+ app.config['CELERY_ACCEPT_CONTENT'] = ['json', 'application/text']
CORS(app, resources={r"/*": {"origins": "*"}})
AuthManager.init_login(app)
| Error logs generated in Celery while sending Mails
```
Traceback (most recent call last):
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/celery/worker/consumer/consumer.py", line 551, in on_task_received
payload = message.decode()
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py", line 193, in decode
self._decoded_cache = self._decode()
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/message.py", line 198, in _decode
self.content_encoding, accept=self.accept)
File "/Users/abhinav/Documents/OpenSource/fossassia/open-event-server/venv/lib/python3.6/site-packages/kombu/serialization.py", line 253, in loads
raise self._for_untrusted_content(content_type, 'untrusted')
kombu.exceptions.ContentDisallowed: Refusing to deserialize untrusted content of type pickle (application/x-python-serialize)
```
Similar logs are there for JSON format of mail objects.
|
translate__pootle-3380 | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008-2013 Zuza Software Foundation\n# Copyright 2014 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport glob\nimport os\nimport re\nimport sys\n\nfrom distutils import log\nfrom distutils.command.build import build as DistutilsBuild\nfrom distutils.errors import DistutilsOptionError\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom pootle.__version__ import sver as pootle_version\n\n\ndef parse_requirements(file_name):\n \"\"\"Parses a pip requirements file and returns a list of packages.\n\n Use the result of this function in the ``install_requires`` field.\n Copied from cburgmer/pdfserver.\n \"\"\"\n requirements = []\n for line in open(file_name, 'r').read().split('\\n'):\n # Ignore comments, blank lines and included requirements files\n if re.match(r'(\\s*#)|(\\s*$)|(-r .*$)', line):\n continue\n\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line))\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n\n return requirements\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['--tb=short', 'tests/']\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nclass PootleBuildMo(DistutilsBuild):\n\n description = \"compile Gettext PO files into MO\"\n user_options = [\n ('all', None,\n \"compile all language (don't use LINGUAS file)\"),\n ('lang=', 'l',\n \"specify a language to compile\"),\n ]\n boolean_options = ['all']\n\n po_path_base = os.path.join('pootle', 'locale')\n _langs = []\n\n def initialize_options(self):\n self.all = False\n self.lang = None\n\n def finalize_options(self):\n if self.all and self.lang is not None:\n raise DistutilsOptionError(\n \"Can't use --all and --lang together\"\n )\n if self.lang is not None:\n self._langs = [self.lang]\n elif self.all:\n for lang in os.listdir(self.po_path_base):\n if (os.path.isdir(os.path.join(self.po_path_base, lang)) and\n lang != \"templates\"):\n self._langs.append(lang)\n else:\n for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):\n self._langs.append(lang.rstrip())\n\n def build_mo(self):\n \"\"\"Compile .mo files from available .po files\"\"\"\n import subprocess\n import gettext\n from translate.storage import factory\n\n for lang in self._langs:\n lang = lang.rstrip()\n\n po_path = os.path.join('pootle', 'locale', lang)\n mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n for po, mo in (('pootle.po', 'django.mo'),\n ('pootle_js.po', 'djangojs.mo')):\n po_filename = os.path.join(po_path, po)\n mo_filename = os.path.join(mo_path, mo)\n\n if not os.path.exists(po_filename):\n log.warn(\"%s: missing file %s\", lang, po_filename)\n continue\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n log.info(\"compiling %s\", lang)\n try:\n subprocess.call([\n 'msgfmt', '--strict', '-o', mo_filename, po_filename],\n stderr=subprocess.STDOUT)\n except Exception as e:\n log.warn(\"%s: skipping, running msgfmt failed: %s\",\n lang, e)\n\n try:\n store = factory.getobject(po_filename)\n gettext.c2py(store.getheaderplural()[1])\n except Exception:\n log.warn(\"%s: invalid plural header in %s\",\n lang, po_filename)\n\n def run(self):\n self.build_mo()\n\n\nsetup(\n name=\"Pootle\",\n version=pootle_version,\n\n description=\"An online collaborative localization tool.\",\n long_description=open(\n os.path.join(os.path.dirname(__file__), 'README.rst')\n ).read(),\n\n author=\"Translate\",\n author_email=\"[email protected]\",\n license=\"GNU General Public License (GPL)\",\n url=\"http://pootle.translatehouse.org\",\n download_url=\"http://sourceforge.net/projects/translate/files/Pootle/\" + pootle_version,\n\n install_requires=parse_requirements('requirements/base.txt'),\n tests_require=parse_requirements('requirements/tests.txt'),\n\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Unix\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Localization\",\n \"Topic :: Text Processing :: Linguistic\"\n ],\n zip_safe=False,\n packages=find_packages(exclude=['deploy*']),\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'pootle = pootle.runner:main',\n ],\n },\n cmdclass={\n 'build_mo': PootleBuildMo,\n 'test': PyTest,\n },\n)\n",
"path": "setup.py"
}
] | [
{
"content": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# Copyright 2008-2013 Zuza Software Foundation\n# Copyright 2014 Evernote Corporation\n#\n# This file is part of Pootle.\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, see <http://www.gnu.org/licenses/>.\n\nimport glob\nimport os\nimport re\nimport sys\n\nfrom distutils import log\nfrom distutils.command.build import build as DistutilsBuild\nfrom distutils.errors import DistutilsOptionError\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as TestCommand\n\nfrom pootle.__version__ import sver as pootle_version\n\n\ndef parse_requirements(file_name):\n \"\"\"Parses a pip requirements file and returns a list of packages.\n\n Use the result of this function in the ``install_requires`` field.\n Copied from cburgmer/pdfserver.\n \"\"\"\n requirements = []\n for line in open(file_name, 'r').read().split('\\n'):\n # Ignore comments, blank lines and included requirements files\n if re.match(r'(\\s*#)|(\\s*$)|((-r|--allow-external|--allow-unverified) .*$)', line):\n continue\n\n if re.match(r'\\s*-e\\s+', line):\n requirements.append(re.sub(r'\\s*-e\\s+.*#egg=(.*)$', r'\\1', line))\n elif re.match(r'\\s*-f\\s+', line):\n pass\n else:\n requirements.append(line)\n\n return requirements\n\n\nclass PyTest(TestCommand):\n\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_args = ['--tb=short', 'tests/']\n self.test_suite = True\n\n def run_tests(self):\n #import here, cause outside the eggs aren't loaded\n import pytest\n errno = pytest.main(self.test_args)\n sys.exit(errno)\n\n\nclass PootleBuildMo(DistutilsBuild):\n\n description = \"compile Gettext PO files into MO\"\n user_options = [\n ('all', None,\n \"compile all language (don't use LINGUAS file)\"),\n ('lang=', 'l',\n \"specify a language to compile\"),\n ]\n boolean_options = ['all']\n\n po_path_base = os.path.join('pootle', 'locale')\n _langs = []\n\n def initialize_options(self):\n self.all = False\n self.lang = None\n\n def finalize_options(self):\n if self.all and self.lang is not None:\n raise DistutilsOptionError(\n \"Can't use --all and --lang together\"\n )\n if self.lang is not None:\n self._langs = [self.lang]\n elif self.all:\n for lang in os.listdir(self.po_path_base):\n if (os.path.isdir(os.path.join(self.po_path_base, lang)) and\n lang != \"templates\"):\n self._langs.append(lang)\n else:\n for lang in open(os.path.join('pootle', 'locale', 'LINGUAS')):\n self._langs.append(lang.rstrip())\n\n def build_mo(self):\n \"\"\"Compile .mo files from available .po files\"\"\"\n import subprocess\n import gettext\n from translate.storage import factory\n\n for lang in self._langs:\n lang = lang.rstrip()\n\n po_path = os.path.join('pootle', 'locale', lang)\n mo_path = os.path.join('pootle', 'locale', lang, 'LC_MESSAGES')\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n for po, mo in (('pootle.po', 'django.mo'),\n ('pootle_js.po', 'djangojs.mo')):\n po_filename = os.path.join(po_path, po)\n mo_filename = os.path.join(mo_path, mo)\n\n if not os.path.exists(po_filename):\n log.warn(\"%s: missing file %s\", lang, po_filename)\n continue\n\n if not os.path.exists(mo_path):\n os.makedirs(mo_path)\n\n log.info(\"compiling %s\", lang)\n try:\n subprocess.call([\n 'msgfmt', '--strict', '-o', mo_filename, po_filename],\n stderr=subprocess.STDOUT)\n except Exception as e:\n log.warn(\"%s: skipping, running msgfmt failed: %s\",\n lang, e)\n\n try:\n store = factory.getobject(po_filename)\n gettext.c2py(store.getheaderplural()[1])\n except Exception:\n log.warn(\"%s: invalid plural header in %s\",\n lang, po_filename)\n\n def run(self):\n self.build_mo()\n\n\nsetup(\n name=\"Pootle\",\n version=pootle_version,\n\n description=\"An online collaborative localization tool.\",\n long_description=open(\n os.path.join(os.path.dirname(__file__), 'README.rst')\n ).read(),\n\n author=\"Translate\",\n author_email=\"[email protected]\",\n license=\"GNU General Public License (GPL)\",\n url=\"http://pootle.translatehouse.org\",\n download_url=\"http://sourceforge.net/projects/translate/files/Pootle/\" + pootle_version,\n\n install_requires=parse_requirements('requirements/base.txt'),\n tests_require=parse_requirements('requirements/tests.txt'),\n\n platforms=[\"any\"],\n classifiers=[\n \"Development Status :: 5 - Production/Stable\",\n \"Environment :: Web Environment\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: End Users/Desktop\",\n \"Intended Audience :: Information Technology\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Operating System :: Microsoft :: Windows\",\n \"Operating System :: Unix\",\n \"Programming Language :: JavaScript\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Localization\",\n \"Topic :: Text Processing :: Linguistic\"\n ],\n zip_safe=False,\n packages=find_packages(exclude=['deploy*']),\n include_package_data=True,\n\n entry_points={\n 'console_scripts': [\n 'pootle = pootle.runner:main',\n ],\n },\n cmdclass={\n 'build_mo': PootleBuildMo,\n 'test': PyTest,\n },\n)\n",
"path": "setup.py"
}
] | diff --git a/.travis.yml b/.travis.yml
index 105e076d692..baea2136dac 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,24 +4,71 @@ python:
- 2.6
- 2.7
env:
- - DJANGO_VERSION=1.4.5
-# When we do finally support Django 1.5 - it requires Python 2.6.5, so exclude
-# earlier versions (see https://docs.djangoproject.com/en/dev/faq/install/).
-#matrix:
-# exclude:
-# - python: 2.5
-# env: DJANGO_VERSION=1.5
+ matrix:
+ - DJANGO_VERSION=1.6.5 DATABASE_BACKEND=sqlite
+ - DJANGO_VERSION=1.6.5 DATABASE_BACKEND=postgres
+ - DJANGO_VERSION=1.6.5 DATABASE_BACKEND=mysql
+ - DJANGO_VERSION=1.7.1 DATABASE_BACKEND=sqlite
+ - DJANGO_VERSION=1.7.1 DATABASE_BACKEND=postgres
+ - DJANGO_VERSION=1.7.1 DATABASE_BACKEND=mysql
+matrix:
+ exclude:
+ - python: 2.6
+ env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=sqlite
+ - python: 2.6
+ env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=mysql
+ - python: 2.6
+ env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=postgres
+ allow_failures:
+ # Allow failure on Django 1.7.1 until we've migrated away from South to
+ # Django migration
+ - env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=sqlite
+ - env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=mysql
+ - env: DJANGO_VERSION=1.7.1 DATABASE_BACKEND=postgres
+ # Disable alternate databases until we break away from being stuck on
+ # MyISAM
+ - env: DJANGO_VERSION=1.6.5 DATABASE_BACKEND=sqlite
+ - env: DJANGO_VERSION=1.6.5 DATABASE_BACKEND=postgres
+cache:
+ - apt
+before_install:
+ - travis_retry sudo apt-get install -qq bc
install:
- - pip install --use-mirrors Django==$DJANGO_VERSION --timeout=240
- - pip install -r requirements/build.txt --use-mirrors --timeout=240
- - python -V; pip freeze # Print all installed versions for reference.
+ - travis_retry pip install -r requirements/travis.txt --timeout=240
+ - travis_retry pip install "Django>=$DJANGO_VERSION,<"$(echo $(echo $DJANGO_VERSION | cut -d"." -f1,2) + 0.1 | bc) --timeout=240
+ - pip freeze # Print all installed versions for reference.
+before_script:
+ - if [[ $DATABASE_BACKEND == 'postgres' ]]; then psql -c 'create database pootle;' -U postgres; fi
+ - if [[ $DATABASE_BACKEND == 'mysql' ]]; then mysql -e 'create database pootle;'; fi
script:
- - make build SHELL=/bin/bash TAIL='|tail -40; exit $$PIPESTATUS'
- - ./manage.py runserver &
- - TESTPID=$!
- - sleep 100
- - kill -2 $TESTPID
+ - python -m compileall -q -f .
+ # FIXME revert breakdown of 'make build' after core merge
+ #- make docs
+ #- make build SHELL=/bin/bash TAIL='|tail -40; exit $$PIPESTATUS'
+ # FIXME Don't raise Warning as Errors till docs have landed and are cleaned
+ # up See https://github.com/translate/pootle/issues/3307
+ - (cd docs; make SPHINXOPTS="-q" html)
+ - make assets
+ - make mo
+ - coverage run --parallel-mode manage.py syncdb --noinput --traceback
+ # FIXME disable the migrate, initdb, runserver..kill block until merge or
+ # #3378 and #3379 are fixed
+ #- coverage run --parallel-mode manage.py migrate --traceback
+ #- coverage run --parallel-mode manage.py initdb --traceback
+ #- coverage run --parallel-mode ./manage.py runserver --traceback &
+ #- TESTPID=$!
+ #- sleep 20
+ #- kill -2 $TESTPID
+ # FIXME reenable migration testing
+ #- tests/version-migration/migrate.sh
+ - make pep8
+ - py.test
+after_success:
+ - coverage combine
+ - coveralls
notifications:
email:
on_failure: always
on_success: change
+services:
+ - redis-server
diff --git a/Makefile b/Makefile
index d37a4218fd2..3f959c9ecb0 100644
--- a/Makefile
+++ b/Makefile
@@ -13,7 +13,7 @@ SFUSERNAME=$(shell egrep -A5 sourceforge ~/.ssh/config | egrep -m1 User | cut -d
FORMATS=--formats=bztar
TEST_ENV_NAME = pootle_test_env
-.PHONY: all build clean sprite test pot mo mo-all help docs assets
+.PHONY: all build clean sprite test pot mo mo-all help docs assets pep8
all: help
@@ -60,6 +60,9 @@ mo:
mo-all:
python setup.py build_mo --all
+pep8:
+ @./pootle/tools/pep8.sh travis
+
publish-pypi:
python setup.py sdist ${FORMATS} upload
@@ -85,6 +88,7 @@ help:
@echo " sprite - create CSS sprite"
@echo " clean - remove any temporal files"
@echo " test - run test suite"
+ @echo " pep8 - run pep8 checks"
@echo " pot - update the POT translations templates"
@echo " mo - build MO files for languages listed in 'pootle/locale/LINGUAS'"
@echo " mo-all - build MO files for all languages (only use for testing)"
diff --git a/pootle/log/README b/pootle/log/README
new file mode 100644
index 00000000000..ddd242e88ba
--- /dev/null
+++ b/pootle/log/README
@@ -0,0 +1 @@
+This directory (logs) contains the Pootle activity logs.
diff --git a/pootle/settings/91-travis.conf b/pootle/settings/91-travis.conf
new file mode 100644
index 00000000000..f8f265b6187
--- /dev/null
+++ b/pootle/settings/91-travis.conf
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+
+"""
+Pootle configuration for Travis
+https://travis-ci.org
+"""
+
+import os
+
+
+if os.environ.get("TRAVIS"):
+ DEBUG = True
+ TEMPLATE_DEBUG = DEBUG
+
+ DATABASE_BACKEND = os.environ.get("DATABASE_BACKEND")
+ DATABASES = {
+ 'default': {
+ 'ENGINE': 'django.db.backends.sqlite3',
+ 'NAME': working_path('dbs/pootle_travis.db'),
+ 'USER': '',
+ 'PASSWORD': '',
+ 'HOST': '',
+ 'PORT': '',
+ 'ATOMIC_REQUESTS': True,
+ }
+ }
+
+ CACHES = {
+ 'default': {
+ 'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
+ }
+ }
+
+
+ if DATABASE_BACKEND == "postgres":
+ DATABASES['default']['ENGINE'] = 'django.db.backends.postgresql_psycopg2'
+ DATABASES['default']['NAME'] = 'pootle'
+ DATABASES['default']['USER'] = 'postgres'
+ elif DATABASE_BACKEND == "mysql":
+ DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
+ DATABASES['default']['NAME'] = 'pootle'
+ DATABASES['default']['USER'] = 'travis'
+ # Remove this once we've closed #3363 and #3364
+ DATABASES['default']['OPTIONS'] = {
+ 'init_command': 'SET storage_engine=MyISAM',
+ }
diff --git a/requirements/base.txt b/requirements/base.txt
index da54c9071ec..73d2f477d0b 100644
--- a/requirements/base.txt
+++ b/requirements/base.txt
@@ -9,12 +9,19 @@ django-redis-cache
django-rq
# Required by webassets, isn't shipped in Python < 2.7
+# FIXME remove once Django >= 1.7 is required
argparse
+# Needed for Python < 2.7
+importlib
# Libraries
cssmin
diff-match-patch>=20121119
lxml>=2.2.0
+# FIXME drop once https://github.com/translate/pootle/issues/3343 and Evernote
+# allauth backend have landed.
+--allow-external pyDes
+--allow-unverified pyDes
pyDes
# Translate Toolkit
diff --git a/requirements/tests.txt b/requirements/tests.txt
index 96b586ece1c..6d1ade01559 100644
--- a/requirements/tests.txt
+++ b/requirements/tests.txt
@@ -1,5 +1,3 @@
--r deploy.txt
-
factory_boy
pytest
-pytest-django>=2.6.1
+pytest-django>=2.6.1,<2.7
diff --git a/requirements/travis.txt b/requirements/travis.txt
new file mode 100644
index 00000000000..a9e60ff03cb
--- /dev/null
+++ b/requirements/travis.txt
@@ -0,0 +1,7 @@
+-r build.txt
+-r tests.txt
+
+coveralls
+MySQL-python
+psycopg2
+pep8
diff --git a/setup.py b/setup.py
index 6f5b2df060d..a90be31ee96 100755
--- a/setup.py
+++ b/setup.py
@@ -43,7 +43,7 @@ def parse_requirements(file_name):
requirements = []
for line in open(file_name, 'r').read().split('\n'):
# Ignore comments, blank lines and included requirements files
- if re.match(r'(\s*#)|(\s*$)|(-r .*$)', line):
+ if re.match(r'(\s*#)|(\s*$)|((-r|--allow-external|--allow-unverified) .*$)', line):
continue
if re.match(r'\s*-e\s+', line):
| Core: drop MySQL dependence on MyISAM
Core depends on MyISAM at the moment because of low level features used for changeid tracking. We need to migrate that to a more general approach that works on InnoDB and other supported DB engines.
- [x] Make resources list work in all DB backends (#3539)
- [x] Switch revision counter to Redis (#3364)
- [x] Ensure tests run on InnoDB (#3777)
|
CiviWiki__OpenCiviWiki-1375 | [
{
"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\n# TODO: re-organize and simplify staticfiles settings\nif \"CIVIWIKI_LOCAL_NAME\" not in os.environ:\n STATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n",
"path": "project/core/settings.py"
}
] | [
{
"content": "\"\"\"\nDjango settings for civiwiki project.\nDarius Calliet May 12, 2016\n\nProduction settings file to select proper environment variables.\n\"\"\"\nimport os\n\n# False if not in os.environ\nDEBUG = os.getenv(\"DEBUG\", False)\n\n# defaults to second value if not found in os.environ\nDJANGO_HOST = os.getenv(\"DJANGO_HOST\", \"LOCALHOST\")\n\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\nSECRET_KEY = os.getenv(\"DJANGO_SECRET_KEY\", \"TEST_KEY_FOR_DEVELOPMENT\")\nALLOWED_HOSTS = [\".herokuapp.com\", \".civiwiki.org\", \"127.0.0.1\", \"localhost\", \"0.0.0.0\"]\n\nINSTALLED_APPS = (\n \"django.contrib.admin\",\n \"django.contrib.auth\",\n \"django.contrib.contenttypes\",\n \"django.contrib.sessions\",\n \"django.contrib.messages\",\n \"django.contrib.staticfiles\",\n \"django_extensions\",\n \"storages\",\n \"core\",\n \"rest_framework\",\n \"accounts.apps.AccountsConfig\",\n \"threads\",\n \"notifications\",\n \"corsheaders\",\n \"taggit\",\n \"categories\",\n \"notification\",\n \"debug_toolbar\",\n)\n\nMIDDLEWARE = [\n \"debug_toolbar.middleware.DebugToolbarMiddleware\",\n \"corsheaders.middleware.CorsMiddleware\",\n \"django.middleware.security.SecurityMiddleware\",\n \"whitenoise.middleware.WhiteNoiseMiddleware\",\n \"django.contrib.sessions.middleware.SessionMiddleware\",\n \"django.middleware.common.CommonMiddleware\",\n \"django.middleware.csrf.CsrfViewMiddleware\",\n \"django.contrib.auth.middleware.AuthenticationMiddleware\",\n # 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',\n \"django.contrib.messages.middleware.MessageMiddleware\",\n \"django.middleware.clickjacking.XFrameOptionsMiddleware\",\n]\n\nINTERNAL_IPS = [\n \"127.0.0.1\",\n]\n\nCSRF_USE_SESSIONS = (\n True # Store the CSRF token in the users session instead of in a cookie\n)\n\nCORS_ORIGIN_ALLOW_ALL = True\nROOT_URLCONF = \"core.urls\"\n\n# SSL Setup\nif DJANGO_HOST != \"LOCALHOST\":\n SECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\n SECURE_SSL_REDIRECT = True\n SESSION_COOKIE_SECURE = True\n CSRF_COOKIE_SECURE = True\n\n# Internationalization & Localization\nLANGUAGE_CODE = \"en-us\"\nTIME_ZONE = \"UTC\"\nUSE_I18N = True\nUSE_L10N = True\nUSE_TZ = True\n\nTEMPLATES = [\n {\n \"BACKEND\": \"django.template.backends.django.DjangoTemplates\",\n \"DIRS\": [\n os.path.join(BASE_DIR, \"threads/templates/threads\"),\n os.path.join(BASE_DIR, \"accounts/templates/accounts\"),\n ], # TODO: Add non-webapp template directory\n \"APP_DIRS\": True,\n \"OPTIONS\": {\n \"context_processors\": [\n \"django.template.context_processors.debug\",\n \"django.template.context_processors.request\",\n \"django.contrib.auth.context_processors.auth\",\n \"django.contrib.messages.context_processors.messages\",\n ],\n },\n },\n]\n\nWSGI_APPLICATION = \"core.wsgi.application\"\n\n# Apex Contact for Production Errors\nADMINS = [(\"Development Team\", \"[email protected]\")]\n\nSTATIC_URL = \"/static/\"\nSTATICFILES_DIRS = (os.path.join(BASE_DIR, \"core/templates/static\"),)\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\n\nMEDIA_ROOT = os.path.join(BASE_DIR, \"media\")\nMEDIA_URL = \"/media/\"\n\nSTATICFILES_STORAGE = \"whitenoise.storage.CompressedManifestStaticFilesStorage\"\n\n# Use DATABASE_URL in production\nDATABASE_URL = os.getenv(\"DATABASE_URL\")\n\nif DATABASE_URL is not None:\n DATABASES = {\"default\": DATABASE_URL}\nelse:\n # Default to sqlite for simplicity in development\n DATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.sqlite3\",\n \"NAME\": BASE_DIR + \"/\" + \"db.sqlite3\",\n }\n }\n\n# Email Backend Setup\nif \"EMAIL_HOST\" not in os.environ:\n EMAIL_BACKEND = \"django.core.mail.backends.console.EmailBackend\"\n EMAIL_HOST_USER = \"[email protected]\"\nelse:\n EMAIL_BACKEND = \"django.core.mail.backends.smtp.EmailBackend\"\n EMAIL_HOST = os.getenv(\"EMAIL_HOST\")\n EMAIL_PORT = os.getenv(\"EMAIL_PORT\")\n EMAIL_HOST_USER = os.getenv(\"EMAIL_HOST_USER\")\n EMAIL_HOST_PASSWORD = os.getenv(\"EMAIL_HOST_PASSWORD\")\n EMAIL_USE_SSL = True\n DEFAULT_FROM_EMAIL = EMAIL_HOST\n\n# Notification API Settings\nNOTIFICATIONS_SOFT_DELETE = True\nNOTIFICATIONS_USE_JSONFIELD = True\n\n# Django REST API Settings\nDEFAULT_RENDERER_CLASSES = (\"rest_framework.renderers.JSONRenderer\",)\n\nif DEBUG:\n # Browsable HTML - Enabled only in Debug mode (dev)\n DEFAULT_RENDERER_CLASSES = DEFAULT_RENDERER_CLASSES + (\n \"rest_framework.renderers.BrowsableAPIRenderer\",\n )\n\nREST_FRAMEWORK = {\n \"DEFAULT_PERMISSION_CLASSES\": (\"rest_framework.permissions.IsAuthenticated\",),\n \"DEFAULT_RENDERER_CLASSES\": DEFAULT_RENDERER_CLASSES,\n \"DEFAULT_AUTHENTICATION_CLASSES\": (\n \"rest_framework.authentication.BasicAuthentication\",\n \"rest_framework.authentication.SessionAuthentication\",\n ),\n}\n\n# CORS Settings\nCORS_ORIGIN_ALLOW_ALL = True\n\n# Custom User model\nAUTH_USER_MODEL = \"accounts.User\"\n\nDEFAULT_AUTO_FIELD = \"django.db.models.AutoField\"\n\n# Login Logout URLS\nLOGIN_URL = \"login/\"\nLOGIN_REDIRECT_URL = \"/\"\nLOGOUT_REDIRECT_URL = \"/\"\n\nAUTH_PASSWORD_VALIDATORS = [\n {\n \"NAME\": \"django.contrib.auth.password_validation.UserAttributeSimilarityValidator\", # noqa: E501\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.MinimumLengthValidator\",\n \"OPTIONS\": {\n \"min_length\": 4,\n },\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.CommonPasswordValidator\",\n },\n {\n \"NAME\": \"django.contrib.auth.password_validation.NumericPasswordValidator\",\n },\n]\n\nLOGGING = {\n \"version\": 1,\n \"disable_existing_loggers\": False,\n \"formatters\": {\"rich\": {\"datefmt\": \"[%X]\"}},\n \"handlers\": {\n \"console\": {\n \"class\": \"rich.logging.RichHandler\",\n \"formatter\": \"rich\",\n \"level\": \"WARNING\",\n # \"filters\": [\"require_debug_true\"],\n \"rich_tracebacks\": True,\n \"tracebacks_show_locals\": True,\n }\n },\n \"loggers\": {\"django\": {\"handlers\": [\"console\"]}},\n}\n",
"path": "project/core/settings.py"
}
] | diff --git a/.env b/.env
new file mode 100644
index 000000000..0af7c0bbc
--- /dev/null
+++ b/.env
@@ -0,0 +1,5 @@
+# Note: these environment variables are only for local development.
+# Do not use them in production.
+DJANGO_SECRET_KEY=123-secret-for-development
+
+DEBUG=True
diff --git a/.env_sample b/.env_sample
deleted file mode 100644
index b59b35a77..000000000
--- a/.env_sample
+++ /dev/null
@@ -1,15 +0,0 @@
-DJANGO_SECRET_KEY=<django_secret_key>
-
-POSTGRES_HOST=db
-POSTGRES_PORT=5432
-POSTGRES_DB=civiwiki_db
-POSTGRES_USER=civiwiki
-POSTGRES_PASSWORD=civiwiki
-
-
-CIVIWIKI_LOCAL_DB_HOST=db
-CIVIWIKI_LOCAL_NAME=civiwiki_db
-CIVIWIKI_LOCAL_USERNAME=civiwiki
-CIVIWIKI_LOCAL_PASSWORD=civiwiki
-
-DEBUG=True
diff --git a/.gitignore b/.gitignore
index 0f691f14a..c4e862ae8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -6,7 +6,8 @@ VirtualEnv
/project/media/*
/project/staticfiles/*
.idea
-.env
+# Note: we use .env for local development
+#.env
data/
### JetBrains template
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 5a4740cf0..8c2e8bc4f 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -2,11 +2,11 @@
Thank you for your interest in contributing to CiviWiki. There are many ways to contribute, such as by sharing ideas, design, testing, community building, and development.
-The following sections outline some common ways to contribue ideas, feature requests, and code.
+The following sections outline common ways to contribute ideas, feature requests, and code.
## Creating issues
-Make sure to search beforehand to see if the issue has been previously reported.
+Make sure to search beforehand to see if the issue has been reported.
The issue tracker should not be used for personal support requests. Please direct those to our [live chat](https://riot.im/app/#/room/#CiviWiki:matrix.org)
@@ -26,27 +26,54 @@ A good example should contain:
5. (Optional)Potential solutions to the problem.
-A good bug report will help direct developers to solve the problem at hand without wasting time trying to figure out the problem in the first place.
+A good bug report will help developers solve the problem without wasting time trying to figure out the situation in the first place.
### Feature requests/enhancements
-If you have a budding idea or a feature that requires a more community-involved discussion, consider having the development discussion on the [live chat](https://riot.im/app/#/room/#CiviWiki:matrix.org) or create a thread on [loomio](https://www.loomio.org/g/ET40tXUC/openciviwiki). This will allow for a well-thought-out issue that will more likely be in line with the goal of the project.
+If you have a budding idea or a feature that requires a more community-involved discussion, consider having the development discussion on the [live chat](https://riot.im/app/#/room/#CiviWiki:matrix.org) or create a thread on [loomio](https://www.loomio.org/g/ET40tXUC/openciviwiki). This will allow for a well-thought-out issue that will more likely align with the project's goal.
## Development
-The following sections describe how to set up a development environment. Note, we have tried to simplify our development set-up in order to reduce barriers to entry.
+The following sections describe how to set up a development environment. Note we have tried to simplify our development setup to reduce barriers to entry.
-### Install requirements
+### First, claim an issue
+
+:warning: When contributing code, please make sure you claim any related issue before opening a pull request. You can claim an issue by adding a comment like "I'd like to work on this issue." Then, we will be able to assign you to the issue so other developers know which tasks have been claimed.
+
+### Install Poetry with .env support
We now use Poetry for Python package management. Please follow the [Poetry installation instructions](https://python-poetry.org/docs/#installation) before trying the following steps.
-To install all required modules, complete the following steps.
+Once Poetry is installed, enable Poetry `.env` support by running the following command. This will set all variables defined in the `.env` file when activating the virtual environment.
+
+```sh
+poetry self add poetry-dotenv-plugin
+```
+
+### Install requirements
+
+Once Poetry is installed, complete the following steps to install all required modules.
1. Make sure you are in the repository root directory
2. Initialize the virtual environment with Poetry: `poetry install`
-3. Change into project directory
-Now that you are in the project directory, you can continue the following sections of this guide.
+### Activate virtual environment
+
+To develop the project, activate the virtual environment with the following command.
+
+```sh
+poetry shell
+```
+
+### Change into the project directory
+
+Once you have installed the project dependencies and activated the virtual environment, change into the project directory.
+
+```sh
+cd project/
+```
+
+Once you are in the project directory, you can continue the following sections of this guide.
### Install pre-commit
@@ -66,15 +93,15 @@ python manage.py migrate
### Collect static files
-Certain resources, such as CSS and JavaScript files, need to be served from a static directory. Run the following command to collect static files for Django to serve:
+Resources, such as CSS and JavaScript files, need to be served from a static directory. Run the following command to collect static files for Django to serve:
```py
python manage.py collectstatic
```
-### Create super user
+### Create a super user
-You will need a super user in order to log in and manage CiviWiki:
+You will need a super user to log in and manage CiviWiki:
```py
python manage.py createsuperuser
@@ -82,7 +109,7 @@ python manage.py createsuperuser
### Populate initial data
-During the first setup, it's useful to import hardcoded initial entries. In this case, there are two fixtures:
+During the first setup, it's helpful to import hardcoded initial entries. In this case, there are two fixtures:
* Sample threads, located in `project/data/sample_threads.json`
* Sample categories, located in `project/data/categories.json`
@@ -110,7 +137,7 @@ python manage.py runserver
### Run unit tests
-Execute unit tests by running the following command from within the `project` directory.
+Execute unit tests by running the following command within the `project` directory.
```sh
python manage.py test
@@ -118,7 +145,7 @@ python manage.py test
### Register initial user (optional)
-Once CiviWiki is running, visit the front page (probably something like http://localhost:8000). Once there, click 'log in/register', and then 'register new user'.
+Once CiviWiki runs, visit the front page (probably something like http://localhost:8000). Once there, click "login/register" and then "register new user."
## Deployment
@@ -126,21 +153,21 @@ The [deployment instructions for Heroku](https://github.com/CiviWiki/OpenCiviWik
## Coding Conventions
-We strive to follow Django Coding Conventions. See https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/coding-style/
+We strive to follow [Django Coding Conventions](https://docs.djangoproject.com/en/dev/internals/contributing/writing-code/coding-style/).
## Compatible Versioning
-We use Compatibile Versioning in this project.
+We use Compatible Versioning in this project.
Given a version number MAJOR.MINOR, increment the:
-MAJOR version when you make backwards-incompatible updates of any kind
-MINOR version when you make 100% backwards-compatible updates
-Additional labels for pre-release and build metadata are available as extensions to the MAJOR.MINOR format.
+MAJOR version when you make backward-incompatible updates of any kind
+MINOR version when you make 100% backward-compatible updates
+Additional pre-release and build metadata labels are available as MAJOR extensions.MINOR format.
-### How is this different to SemVer?
+### How is this different from SemVer?
-Compatible Versioning ("ComVer") is SemVer where every PATCH number is 0 (zero). This way, ComVer is backwards compatible with SemVer.
+Compatible Versioning ("ComVer") is SemVer, where every PATCH number is 0 (zero). This way, ComVer is backward compatible with SemVer.
A ComVer release from 3.6 to 3.7 is just a SemVer release from 3.6.0 to 3.7.0. In other words, ComVer is safe to adopt since it is basically SemVer without ever issuing PATCH releases.
diff --git a/project/core/settings.py b/project/core/settings.py
index a9af92eef..41147f7fd 100644
--- a/project/core/settings.py
+++ b/project/core/settings.py
@@ -107,9 +107,7 @@
MEDIA_ROOT = os.path.join(BASE_DIR, "media")
MEDIA_URL = "/media/"
-# TODO: re-organize and simplify staticfiles settings
-if "CIVIWIKI_LOCAL_NAME" not in os.environ:
- STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
+STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage"
# Use DATABASE_URL in production
DATABASE_URL = os.getenv("DATABASE_URL")
| Update grammar in contributing guide
### Idea summary
Improve the grammar in our contributing guide with an automated grammar checker.
### Further details
_No response_
|
opsdroid__opsdroid-1363 | [
{
"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport os\nimport ssl\nimport certifi\nimport json\n\nimport aiohttp\n\nimport slack\nfrom emoji import demojize\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import (\n Blocks,\n BlockActions,\n MessageAction,\n ViewSubmission,\n ViewClosed,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"bot-name\": str,\n \"default-room\": str,\n \"icon-emoji\": str,\n \"connect-timeout\": int,\n \"chat-as-user\": bool,\n}\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector.\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.chat_as_user = config.get(\"chat-as-user\", False)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack.\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/{}/interactions\".format(self.name),\n self.slack_interactions_handler,\n )\n\n _LOGGER.debug(_(\"Connected as %s.\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s.\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s.\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully.\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s.\"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username.\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except ValueError:\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames.\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n text=message[\"text\"],\n user=user_info[\"name\"],\n target=message[\"channel\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s.\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s.\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s.\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.event_id,\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s.\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n\n async def slack_interactions_handler(self, request):\n \"\"\"Handle interactive events in Slack.\n\n For each entry in request, it will check if the entry is one of the four main\n interaction types in slack: block_actions, message_actions, view_submissions\n and view_closed. Then it will process all the incoming messages.\n\n Return:\n A 200 OK response. The Messenger Platform will resend the webhook\n event every 20 seconds, until a 200 OK response is received.\n Failing to return a 200 OK may cause your webhook to be\n unsubscribed by the Messenger Platform.\n\n \"\"\"\n\n req_data = await request.post()\n payload = json.loads(req_data[\"payload\"])\n\n if \"type\" in payload:\n if payload[\"type\"] == \"block_actions\":\n for action in payload[\"actions\"]:\n block_action = BlockActions(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n\n action_value = None\n if action[\"type\"] == \"button\":\n action_value = action[\"value\"]\n elif action[\"type\"] in [\"overflow\", \"static_select\"]:\n action_value = action[\"selected_option\"][\"value\"]\n elif action[\"type\"] == \"datepicker\":\n action_value = action[\"selected_date\"]\n elif action[\"type\"] == \"multi_static_select\":\n action_value = [v[\"value\"] for v in action[\"selected_options\"]]\n\n if action_value:\n block_action.update_entity(\"value\", action_value)\n await self.opsdroid.parse(block_action)\n elif payload[\"type\"] == \"message_action\":\n await self.opsdroid.parse(\n MessageAction(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_submission\":\n await self.opsdroid.parse(\n ViewSubmission(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_closed\":\n await self.opsdroid.parse(\n ViewClosed(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=200)\n",
"path": "opsdroid/connector/slack/__init__.py"
}
] | [
{
"content": "\"\"\"A connector for Slack.\"\"\"\nimport logging\nimport re\nimport os\nimport ssl\nimport certifi\nimport json\n\nimport aiohttp\n\nimport slack\nfrom emoji import demojize\nfrom voluptuous import Required\n\nfrom opsdroid.connector import Connector, register_event\nfrom opsdroid.events import Message, Reaction\nfrom opsdroid.connector.slack.events import (\n Blocks,\n BlockActions,\n MessageAction,\n ViewSubmission,\n ViewClosed,\n)\n\n\n_LOGGER = logging.getLogger(__name__)\nCONFIG_SCHEMA = {\n Required(\"token\"): str,\n \"bot-name\": str,\n \"default-room\": str,\n \"icon-emoji\": str,\n \"connect-timeout\": int,\n \"chat-as-user\": bool,\n}\n\n\nclass ConnectorSlack(Connector):\n \"\"\"A connector for Slack.\"\"\"\n\n def __init__(self, config, opsdroid=None):\n \"\"\"Create the connector.\"\"\"\n super().__init__(config, opsdroid=opsdroid)\n _LOGGER.debug(_(\"Starting Slack connector.\"))\n self.name = \"slack\"\n self.default_target = config.get(\"default-room\", \"#general\")\n self.icon_emoji = config.get(\"icon-emoji\", \":robot_face:\")\n self.token = config[\"token\"]\n self.timeout = config.get(\"connect-timeout\", 10)\n self.chat_as_user = config.get(\"chat-as-user\", False)\n self.ssl_context = ssl.create_default_context(cafile=certifi.where())\n self.slack = slack.WebClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.slack_rtm = slack.RTMClient(\n token=self.token,\n run_async=True,\n ssl=self.ssl_context,\n proxy=os.environ.get(\"HTTPS_PROXY\"),\n )\n self.websocket = None\n self.bot_name = config.get(\"bot-name\", \"opsdroid\")\n self.auth_info = None\n self.user_info = None\n self.bot_id = None\n self.known_users = {}\n self.keepalive = None\n self.reconnecting = False\n self.listening = True\n self._message_id = 0\n\n # Register callbacks\n slack.RTMClient.on(event=\"message\", callback=self.process_message)\n\n async def connect(self):\n \"\"\"Connect to the chat service.\"\"\"\n _LOGGER.info(_(\"Connecting to Slack.\"))\n\n try:\n # The slack library recommends you call `self.slack_rtm.start()`` here but it\n # seems to mess with the event loop's signal handlers which breaks opsdroid.\n # Therefore we need to directly call the private `_connect_and_read` method\n # instead. This method also blocks so we need to dispatch it to the loop as a task.\n self.opsdroid.eventloop.create_task(self.slack_rtm._connect_and_read())\n\n self.auth_info = (await self.slack.api_call(\"auth.test\")).data\n self.user_info = (\n await self.slack.api_call(\n \"users.info\",\n http_verb=\"GET\",\n params={\"user\": self.auth_info[\"user_id\"]},\n )\n ).data\n self.bot_id = self.user_info[\"user\"][\"profile\"][\"bot_id\"]\n\n self.opsdroid.web_server.web_app.router.add_post(\n \"/connector/{}/interactions\".format(self.name),\n self.slack_interactions_handler,\n )\n\n _LOGGER.debug(_(\"Connected as %s.\"), self.bot_name)\n _LOGGER.debug(_(\"Using icon %s.\"), self.icon_emoji)\n _LOGGER.debug(_(\"Default room is %s.\"), self.default_target)\n _LOGGER.info(_(\"Connected successfully.\"))\n except slack.errors.SlackApiError as error:\n _LOGGER.error(\n _(\n \"Unable to connect to Slack due to %s.\"\n \"The Slack Connector will not be available.\"\n ),\n error,\n )\n except Exception:\n await self.disconnect()\n raise\n\n async def disconnect(self):\n \"\"\"Disconnect from Slack.\"\"\"\n self.slack_rtm.stop()\n self.listening = False\n\n async def listen(self):\n \"\"\"Listen for and parse new messages.\"\"\"\n\n async def process_message(self, **payload):\n \"\"\"Process a raw message and pass it to the parser.\"\"\"\n message = payload[\"data\"]\n\n # Ignore message edits\n if \"subtype\" in message and message[\"subtype\"] == \"message_changed\":\n return\n\n # Ignore own messages\n if (\n \"subtype\" in message\n and message[\"subtype\"] == \"bot_message\"\n and message[\"bot_id\"] == self.bot_id\n ):\n return\n\n # Lookup username\n _LOGGER.debug(_(\"Looking up sender username.\"))\n try:\n user_info = await self.lookup_username(message[\"user\"])\n except (ValueError, KeyError) as error:\n _LOGGER.error(_(\"Username lookup failed for %s.\"), error)\n return\n\n # Replace usernames in the message\n _LOGGER.debug(_(\"Replacing userids in message with usernames.\"))\n message[\"text\"] = await self.replace_usernames(message[\"text\"])\n\n await self.opsdroid.parse(\n Message(\n text=message[\"text\"],\n user=user_info[\"name\"],\n target=message[\"channel\"],\n connector=self,\n raw_event=message,\n )\n )\n\n @register_event(Message)\n async def send_message(self, message):\n \"\"\"Respond with a message.\"\"\"\n _LOGGER.debug(\n _(\"Responding with: '%s' in room %s.\"), message.text, message.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": message.target,\n \"text\": message.text,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Blocks)\n async def send_blocks(self, blocks):\n \"\"\"Respond with structured blocks.\"\"\"\n _LOGGER.debug(\n _(\"Responding with interactive blocks in room %s.\"), blocks.target\n )\n await self.slack.api_call(\n \"chat.postMessage\",\n data={\n \"channel\": blocks.target,\n \"as_user\": self.chat_as_user,\n \"username\": self.bot_name,\n \"blocks\": blocks.blocks,\n \"icon_emoji\": self.icon_emoji,\n },\n )\n\n @register_event(Reaction)\n async def send_reaction(self, reaction):\n \"\"\"React to a message.\"\"\"\n emoji = demojize(reaction.emoji).replace(\":\", \"\")\n _LOGGER.debug(_(\"Reacting with: %s.\"), emoji)\n try:\n await self.slack.api_call(\n \"reactions.add\",\n data={\n \"name\": emoji,\n \"channel\": reaction.target,\n \"timestamp\": reaction.linked_event.event_id,\n },\n )\n except slack.errors.SlackApiError as error:\n if \"invalid_name\" in str(error):\n _LOGGER.warning(_(\"Slack does not support the emoji %s.\"), emoji)\n else:\n raise\n\n async def lookup_username(self, userid):\n \"\"\"Lookup a username and cache it.\"\"\"\n if userid in self.known_users:\n user_info = self.known_users[userid]\n else:\n response = await self.slack.users_info(user=userid)\n user_info = response.data[\"user\"]\n if isinstance(user_info, dict):\n self.known_users[userid] = user_info\n else:\n raise ValueError(\"Returned user is not a dict.\")\n return user_info\n\n async def replace_usernames(self, message):\n \"\"\"Replace User ID with username in message text.\"\"\"\n userids = re.findall(r\"\\<\\@([A-Z0-9]+)(?:\\|.+)?\\>\", message)\n for userid in userids:\n user_info = await self.lookup_username(userid)\n message = message.replace(\n \"<@{userid}>\".format(userid=userid), user_info[\"name\"]\n )\n return message\n\n async def slack_interactions_handler(self, request):\n \"\"\"Handle interactive events in Slack.\n\n For each entry in request, it will check if the entry is one of the four main\n interaction types in slack: block_actions, message_actions, view_submissions\n and view_closed. Then it will process all the incoming messages.\n\n Return:\n A 200 OK response. The Messenger Platform will resend the webhook\n event every 20 seconds, until a 200 OK response is received.\n Failing to return a 200 OK may cause your webhook to be\n unsubscribed by the Messenger Platform.\n\n \"\"\"\n\n req_data = await request.post()\n payload = json.loads(req_data[\"payload\"])\n\n if \"type\" in payload:\n if payload[\"type\"] == \"block_actions\":\n for action in payload[\"actions\"]:\n block_action = BlockActions(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n\n action_value = None\n if action[\"type\"] == \"button\":\n action_value = action[\"value\"]\n elif action[\"type\"] in [\"overflow\", \"static_select\"]:\n action_value = action[\"selected_option\"][\"value\"]\n elif action[\"type\"] == \"datepicker\":\n action_value = action[\"selected_date\"]\n elif action[\"type\"] == \"multi_static_select\":\n action_value = [v[\"value\"] for v in action[\"selected_options\"]]\n\n if action_value:\n block_action.update_entity(\"value\", action_value)\n await self.opsdroid.parse(block_action)\n elif payload[\"type\"] == \"message_action\":\n await self.opsdroid.parse(\n MessageAction(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"channel\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_submission\":\n await self.opsdroid.parse(\n ViewSubmission(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n elif payload[\"type\"] == \"view_closed\":\n await self.opsdroid.parse(\n ViewClosed(\n payload,\n user=payload[\"user\"][\"id\"],\n target=payload[\"user\"][\"id\"],\n connector=self,\n )\n )\n\n return aiohttp.web.Response(text=json.dumps(\"Received\"), status=200)\n",
"path": "opsdroid/connector/slack/__init__.py"
}
] | diff --git a/opsdroid/connector/slack/__init__.py b/opsdroid/connector/slack/__init__.py
index ced412be7..b0de8108f 100644
--- a/opsdroid/connector/slack/__init__.py
+++ b/opsdroid/connector/slack/__init__.py
@@ -144,7 +144,8 @@ async def process_message(self, **payload):
_LOGGER.debug(_("Looking up sender username."))
try:
user_info = await self.lookup_username(message["user"])
- except ValueError:
+ except (ValueError, KeyError) as error:
+ _LOGGER.error(_("Username lookup failed for %s."), error)
return
# Replace usernames in the message
diff --git a/tests/test_connector_slack.py b/tests/test_connector_slack.py
index d4f12641d..c273999e5 100644
--- a/tests/test_connector_slack.py
+++ b/tests/test_connector_slack.py
@@ -148,6 +148,11 @@ async def test_process_message(self):
await connector.process_message(data=message)
self.assertFalse(connector.opsdroid.parse.called)
+ connector.opsdroid.parse.reset_mock()
+ connector.lookup_username.side_effect = KeyError
+ await connector.process_message(data=message)
+ self.assertFalse(connector.opsdroid.parse.called)
+
async def test_lookup_username(self):
"""Test that looking up a username works and that it caches."""
connector = ConnectorSlack({"token": "abc123"}, opsdroid=OpsDroid())
| opsdroid slack connector intermittently ends up in an exception
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description - opsdroid slack connector intermittently ends up in an exception
this doesnt happen for all users - but i see that line 146 in File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py" is the culprit.
```
INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
i am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.
the slack channel has about 82 users.
the bot is part of 2 channels.
also users interact with the bot directly /
## Expected Functionality
no exception - Looking up sender username should succeed.
## Experienced Functionality
Explain what happened instead(Please include the debug log).
```INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Versions
- **Opsdroid version:** latest master code.
- **Python version:** python3.7
- **OS/Docker version:** 18.06.3-ce
## Configuration File
Please include your version of the configuration file below.
configuration file passed in values.yaml helm chart
```yaml
configuration: |
welcome-message: true
connectors:
slack:
token: "xxx"
bot-name: "xxx" # default "opsdroid"
default-room: "#xxx" # default "#general"
#icon-emoji: ":smile:" # default ":robot_face:"
connect-timeout: 10 # default 10 seconds
chat-as-user: false # default false
skills:
- name: skill-yyy-statistics
path: /home/skill/skill-yyy-statistics
db_server: "1.1.1.1"
db_name: "xx"
user: "xxx"
password: "xxx"
- name: skill-yyy-help
path: /home/skill/skill-yyy-help
- name: skill-yyy-cache
path: /home/skill/skill-yyy-cache
db_server: "1.1.1.1"
db_name: "zz"
user: "xxx"
password: "xxxx"
- name: skill-yyy-eee
path: /home/skill/skill-yyy-eee
- name: skill-yyy-ttt
path: /home/skill/skill-yyy-ttt
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
opsdroid slack connector intermittently ends up in an exception
<!-- Before you post an issue or if you are unsure about something join our matrix channel https://riot.im/app/#/room/#opsdroid-general:matrix.org and ask away! We are more than happy to help you. -->
# Description - opsdroid slack connector intermittently ends up in an exception
this doesnt happen for all users - but i see that line 146 in File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py" is the culprit.
```
INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Steps to Reproduce
Please also include relevant information and steps to reproduce the bug/issue.
i am not sure if this can be reproduced elsewhere - otherwise would have been reported by other users.
the slack channel has about 82 users.
the bot is part of 2 channels.
also users interact with the bot directly /
## Expected Functionality
no exception - Looking up sender username should succeed.
## Experienced Functionality
Explain what happened instead(Please include the debug log).
```INFO opsdroid.connector.slack: Connected successfully.
INFO opsdroid.web: Started web server on http://0.0.0.0:8080
INFO opsdroid.core: Opsdroid is now running, press ctrl+c to exit.
DEBUG slack.rtm.client: The Websocket connection has been opened.
DEBUG opsdroid.parsers.crontab: Running crontab skills at Mon Feb 10 10:21:00 2020.
DEBUG slack.rtm.client: Running 1 callbacks for event: 'message'
DEBUG opsdroid.connector.slack: Looking up sender username.
ERROR slack.rtm.client: When calling '#process_message()' in the 'opsdroid.connector.slack' module the following error was raised: 'user'
DEBUG asyncio: Using selector: EpollSelector
Traceback (most recent call last):
File "/usr/local/bin/opsdroid", line 8, in <module>
sys.exit(cli())
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 764, in __call__
return self.main(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 717, in main
rv = self.invoke(ctx)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 1137, in invoke
return _process_result(sub_ctx.command.invoke(sub_ctx))
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 956, in invoke
return ctx.invoke(self.callback, **ctx.params)
File "/usr/local/lib/python3.7/site-packages/click/core.py", line 555, in invoke
return callback(*args, **kwargs)
File "/usr/local/lib/python3.7/site-packages/opsdroid/cli/start.py", line 42, in start
opsdroid.run()
File "/usr/local/lib/python3.7/site-packages/opsdroid/core.py", line 165, in run
self.eventloop.run_until_complete(asyncio.gather(*pending))
File "/usr/local/lib/python3.7/asyncio/base_events.py", line 583, in run_until_complete
return future.result()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 339, in _connect_and_read
await self._read_messages()
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 390, in _read_messages
await self._dispatch_event(event, data=payload)
File "/usr/local/lib/python3.7/site-packages/slack/rtm/client.py", line 437, in _dispatch_event
rtm_client=self, web_client=self._web_client, data=data
File "/usr/local/lib/python3.7/site-packages/opsdroid/connector/slack/__init__.py", line 146, in process_message
user_info = await self.lookup_username(message["user"])
KeyError: 'user'
ERROR: Unhandled exception in opsdroid, exiting...
```
## Versions
- **Opsdroid version:** latest master code.
- **Python version:** python3.7
- **OS/Docker version:** 18.06.3-ce
## Configuration File
Please include your version of the configuration file below.
configuration file passed in values.yaml helm chart
```yaml
configuration: |
welcome-message: true
connectors:
slack:
token: "xxx"
bot-name: "xxx" # default "opsdroid"
default-room: "#xxx" # default "#general"
#icon-emoji: ":smile:" # default ":robot_face:"
connect-timeout: 10 # default 10 seconds
chat-as-user: false # default false
skills:
- name: skill-yyy-statistics
path: /home/skill/skill-yyy-statistics
db_server: "1.1.1.1"
db_name: "xx"
user: "xxx"
password: "xxx"
- name: skill-yyy-help
path: /home/skill/skill-yyy-help
- name: skill-yyy-cache
path: /home/skill/skill-yyy-cache
db_server: "1.1.1.1"
db_name: "zz"
user: "xxx"
password: "xxxx"
- name: skill-yyy-eee
path: /home/skill/skill-yyy-eee
- name: skill-yyy-ttt
path: /home/skill/skill-yyy-ttt
```
## Additional Details
Any other details you wish to include such as screenshots, console messages, etc.
<!-- Love opsdroid? Please consider supporting our collective:
+👉 https://opencollective.com/opsdroid/donate -->
|
pyca__cryptography-7644 | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.bindings._rust import FixedPool\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n self._pool = FixedPool(self._create_fn)\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def _create_fn(self):\n return aead._aead_create_ctx(backend, self, self._key)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 12 or len(nonce) > 15:\n raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n\n\nclass AESSIV(object):\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (32, 48, 64):\n raise ValueError(\"AESSIV key must be 256, 384, or 512 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AES-SIV is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (256, 384, 512):\n raise ValueError(\"bit_length must be 256, 384, or 512\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n if len(data) > self._MAX_SIZE or any(\n len(ad) > self._MAX_SIZE for ad in associated_data\n ):\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n return aead._encrypt(backend, self, b\"\", data, associated_data, 16)\n\n def decrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n return aead._decrypt(backend, self, b\"\", data, associated_data, 16)\n\n def _check_params(\n self,\n data: bytes,\n associated_data: typing.List,\n ) -> None:\n utils._check_bytes(\"data\", data)\n if not isinstance(associated_data, list) or not all(\n isinstance(x, bytes) for x in associated_data\n ):\n raise TypeError(\"associated_data must be a list of bytes or None\")\n",
"path": "src/cryptography/hazmat/primitives/ciphers/aead.py"
}
] | [
{
"content": "# This file is dual licensed under the terms of the Apache License, Version\n# 2.0, and the BSD License. See the LICENSE file in the root of this repository\n# for complete details.\n\n\nimport os\nimport typing\n\nfrom cryptography import exceptions, utils\nfrom cryptography.hazmat.backends.openssl import aead\nfrom cryptography.hazmat.backends.openssl.backend import backend\nfrom cryptography.hazmat.bindings._rust import FixedPool\n\n\nclass ChaCha20Poly1305:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"ChaCha20Poly1305 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n utils._check_byteslike(\"key\", key)\n\n if len(key) != 32:\n raise ValueError(\"ChaCha20Poly1305 key must be 32 bytes.\")\n\n self._key = key\n self._pool = FixedPool(self._create_fn)\n\n @classmethod\n def generate_key(cls) -> bytes:\n return os.urandom(32)\n\n def _create_fn(self):\n return aead._aead_create_ctx(backend, self, self._key)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n with self._pool.acquire() as ctx:\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], 16, ctx\n )\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) != 12:\n raise ValueError(\"Nonce must be 12 bytes\")\n\n\nclass AESCCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes, tag_length: int = 16):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESCCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n if not isinstance(tag_length, int):\n raise TypeError(\"tag_length must be an integer\")\n\n if tag_length not in (4, 6, 8, 10, 12, 14, 16):\n raise ValueError(\"Invalid tag_length\")\n\n self._tag_length = tag_length\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AESCCM is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n self._validate_lengths(nonce, len(data))\n return aead._encrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(\n backend, self, nonce, data, [associated_data], self._tag_length\n )\n\n def _validate_lengths(self, nonce: bytes, data_len: int) -> None:\n # For information about computing this, see\n # https://tools.ietf.org/html/rfc3610#section-2.1\n l_val = 15 - len(nonce)\n if 2 ** (8 * l_val) < data_len:\n raise ValueError(\"Data too long for nonce\")\n\n def _check_params(\n self, nonce: bytes, data: bytes, associated_data: bytes\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if not 7 <= len(nonce) <= 13:\n raise ValueError(\"Nonce must be between 7 and 13 bytes\")\n\n\nclass AESGCM:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESGCM key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 8 or len(nonce) > 128:\n raise ValueError(\"Nonce must be between 8 and 128 bytes\")\n\n\nclass AESOCB3:\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (16, 24, 32):\n raise ValueError(\"AESOCB3 key must be 128, 192, or 256 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"OCB3 is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (128, 192, 256):\n raise ValueError(\"bit_length must be 128, 192, or 256\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n if len(data) > self._MAX_SIZE or len(associated_data) > self._MAX_SIZE:\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n self._check_params(nonce, data, associated_data)\n return aead._encrypt(backend, self, nonce, data, [associated_data], 16)\n\n def decrypt(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: typing.Optional[bytes],\n ) -> bytes:\n if associated_data is None:\n associated_data = b\"\"\n\n self._check_params(nonce, data, associated_data)\n return aead._decrypt(backend, self, nonce, data, [associated_data], 16)\n\n def _check_params(\n self,\n nonce: bytes,\n data: bytes,\n associated_data: bytes,\n ) -> None:\n utils._check_byteslike(\"nonce\", nonce)\n utils._check_bytes(\"data\", data)\n utils._check_bytes(\"associated_data\", associated_data)\n if len(nonce) < 12 or len(nonce) > 15:\n raise ValueError(\"Nonce must be between 12 and 15 bytes\")\n\n\nclass AESSIV(object):\n _MAX_SIZE = 2**31 - 1\n\n def __init__(self, key: bytes):\n utils._check_byteslike(\"key\", key)\n if len(key) not in (32, 48, 64):\n raise ValueError(\"AESSIV key must be 256, 384, or 512 bits.\")\n\n self._key = key\n\n if not backend.aead_cipher_supported(self):\n raise exceptions.UnsupportedAlgorithm(\n \"AES-SIV is not supported by this version of OpenSSL\",\n exceptions._Reasons.UNSUPPORTED_CIPHER,\n )\n\n @classmethod\n def generate_key(cls, bit_length: int) -> bytes:\n if not isinstance(bit_length, int):\n raise TypeError(\"bit_length must be an integer\")\n\n if bit_length not in (256, 384, 512):\n raise ValueError(\"bit_length must be 256, 384, or 512\")\n\n return os.urandom(bit_length // 8)\n\n def encrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n if len(data) > self._MAX_SIZE or any(\n len(ad) > self._MAX_SIZE for ad in associated_data\n ):\n # This is OverflowError to match what cffi would raise\n raise OverflowError(\n \"Data or associated data too long. Max 2**31 - 1 bytes\"\n )\n\n return aead._encrypt(backend, self, b\"\", data, associated_data, 16)\n\n def decrypt(\n self,\n data: bytes,\n associated_data: typing.Optional[typing.List[bytes]],\n ) -> bytes:\n if associated_data is None:\n associated_data = []\n\n self._check_params(data, associated_data)\n\n return aead._decrypt(backend, self, b\"\", data, associated_data, 16)\n\n def _check_params(\n self,\n data: bytes,\n associated_data: typing.List,\n ) -> None:\n utils._check_bytes(\"data\", data)\n if len(data) == 0:\n raise ValueError(\"data must not be zero length\")\n if not isinstance(associated_data, list) or not all(\n isinstance(x, bytes) for x in associated_data\n ):\n raise TypeError(\"associated_data must be a list of bytes or None\")\n",
"path": "src/cryptography/hazmat/primitives/ciphers/aead.py"
}
] | diff --git a/src/cryptography/hazmat/primitives/ciphers/aead.py b/src/cryptography/hazmat/primitives/ciphers/aead.py
index b4564cfcc8bb..16c97a644440 100644
--- a/src/cryptography/hazmat/primitives/ciphers/aead.py
+++ b/src/cryptography/hazmat/primitives/ciphers/aead.py
@@ -366,6 +366,8 @@ def _check_params(
associated_data: typing.List,
) -> None:
utils._check_bytes("data", data)
+ if len(data) == 0:
+ raise ValueError("data must not be zero length")
if not isinstance(associated_data, list) or not all(
isinstance(x, bytes) for x in associated_data
):
diff --git a/tests/hazmat/primitives/test_aead.py b/tests/hazmat/primitives/test_aead.py
index dcbf76bd5f4e..b7a4aedf3ad6 100644
--- a/tests/hazmat/primitives/test_aead.py
+++ b/tests/hazmat/primitives/test_aead.py
@@ -625,7 +625,17 @@ def test_data_too_large(self):
aessiv.encrypt(FakeData(), None)
with pytest.raises(OverflowError):
- aessiv.encrypt(b"", [FakeData()])
+ aessiv.encrypt(b"irrelevant", [FakeData()])
+
+ def test_no_empty_encryption(self):
+ key = AESSIV.generate_key(256)
+ aessiv = AESSIV(key)
+
+ with pytest.raises(ValueError):
+ aessiv.encrypt(b"", None)
+
+ with pytest.raises(ValueError):
+ aessiv.decrypt(b"", None)
def test_vectors(self, backend, subtests):
vectors = load_vectors_from_file(
| AESSIV Encryption/Decryption fails if empty data is passed
## Issue description
If an empty byte string is passed to `data` parameter of methods `encrypt` and `decrypt` of `AESSIV`, operation fails with `InternalError`.
## Steps to reproduce bug
```python
from cryptography.hazmat.primitives.ciphers import aead
key = bytes(32)
data = b""
cipher = aead.AESSIV(key)
output = cipher.encrypt(data, None) # raises `InternalError`
```
## cryptography installation
cryptography is installed via poetry with version constraint >=35.0.0:
```toml
[tool.poetry.dependencies]
python = "^3.8"
cryptography = ">=35.0.0"
```
## Required Version numbers
- `pip` - 22.2.2
- `cffi` - 1.15.1
- `cryptography` - 38.0.1
- `setuptools` - 65.3.0
|
open-mmlab__mmpose-295 | [
{
"content": "import copy as cp\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .topdown_base_dataset import TopDownBaseDataset\n\n\[email protected]_module()\nclass TopDownMpiiTrbDataset(TopDownBaseDataset):\n \"\"\"MPII-TRB Dataset dataset for top-down pose estimation.\n\n `TRB: A Novel Triplet Representation for Understanding 2D Human Body`\n ICCV'2019 More details can be found in the `paper\n <https://arxiv.org/abs/1910.11535>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n # flip_pairs in MPII-TRB\n self.ann_info['flip_pairs'] = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9],\n [10, 11], [14, 15]]\n for i in range(6):\n self.ann_info['flip_pairs'].append([16 + i, 22 + i])\n self.ann_info['flip_pairs'].append([28 + i, 34 + i])\n\n self.ann_info['upper_body_ids'] = [0, 1, 2, 3, 4, 5, 12, 13]\n self.ann_info['lower_body_ids'] = [6, 7, 8, 9, 10, 11]\n self.ann_info['upper_body_ids'].extend(list(range(14, 28)))\n self.ann_info['lower_body_ids'].extend(list(range(28, 40)))\n\n self.ann_info['use_different_joint_weights'] = False\n\n assert self.ann_info['num_joints'] == 40\n self.ann_info['joint_weights'] = np.ones(\n (self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.db = self._get_db(ann_file)\n self.image_set = set(x['image_file'] for x in self.db)\n self.num_images = len(self.image_set)\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n with open(ann_file, 'r') as f:\n data = json.load(f)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_3d=None,\n joints_3d_visible=None,\n dataset='mpii_trb')\n\n imid2info = {\n int(osp.splitext(x['file_name'])[0]): x\n for x in data['images']\n }\n\n num_joints = self.ann_info['num_joints']\n gt_db = []\n\n for anno in data['annotations']:\n newitem = cp.deepcopy(tmpl)\n image_id = anno['image_id']\n newitem['image_file'] = os.path.join(\n self.img_prefix, imid2info[image_id]['file_name'])\n\n if max(anno['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n for ipt in range(num_joints):\n joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)\n joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)\n\n center = np.array(anno['center'], dtype=np.float32)\n scale = self.ann_info['image_size'] / anno['scale'] / 200.0\n newitem['center'] = center\n newitem['scale'] = scale\n newitem['joints_3d'] = joints_3d\n newitem['joints_3d_visible'] = joints_3d_visible\n if 'headbox' in anno:\n newitem['headbox'] = anno['headbox']\n gt_db.append(newitem)\n\n return gt_db\n\n def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, headbox):\n \"\"\"Evaluate one example.\"\"\"\n num_joints = self.ann_info['num_joints']\n headbox = np.array(headbox)\n threshold = np.linalg.norm(headbox[:2] - headbox[2:]) * 0.3\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n for i in range(num_joints):\n pred_pt = pred[i]\n gt_pt = joints_3d[i]\n vis = joints_3d_visible[i][0]\n if vis:\n exist[i] = 1\n else:\n continue\n distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])\n if distance < threshold:\n hit[i] = 1\n return hit, exist\n\n def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):\n \"\"\"Evaluate PCKh for MPII-TRB dataset.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs(list(preds, boxes, image_path, heatmap)):\n\n * preds(np.ndarray[1,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n * boxes(np.ndarray[1,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n * image_path(list[str]): For example, ['0', '0',\n '0', '0', '0', '1', '1', '6', '3', '.', 'j', 'p', 'g']\n * heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n res_folder(str): Path of directory to save the results.\n metric (str | list[str]): Metrics to be performed.\n Defaults: 'PCKh'.\n\n Returns:\n dict: PCKh for each joint\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCKh']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n \"\"\"Evaluate MPII-TRB keypoint results.\"\"\"\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n\n for preds, boxes, image_path, _ in outputs:\n str_image_path = ''.join(image_path)\n image_id = int(osp.basename(osp.splitext(str_image_path)[0]))\n\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image_id': image_id,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report Mean Acc of skeleton, contour and all joints.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n\n assert len(preds) == len(\n self.db), f'len(preds)={len(preds)}, len(self.db)={len(self.db)}'\n for pred, item in zip(preds, self.db):\n h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],\n item['joints_3d_visible'],\n item['headbox'])\n hit += h\n exist += e\n skeleton = np.sum(hit[:14]) / np.sum(exist[:14])\n contour = np.sum(hit[14:]) / np.sum(exist[14:])\n mean = np.sum(hit) / np.sum(exist)\n\n info_str = []\n info_str.append(('Skeleton_acc', skeleton.item()))\n info_str.append(('Contour_acc', contour.item()))\n info_str.append(('PCKh', mean.item()))\n return info_str\n",
"path": "mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py"
}
] | [
{
"content": "import copy as cp\nimport os\nimport os.path as osp\nfrom collections import OrderedDict\n\nimport json_tricks as json\nimport numpy as np\n\nfrom mmpose.datasets.builder import DATASETS\nfrom .topdown_base_dataset import TopDownBaseDataset\n\n\[email protected]_module()\nclass TopDownMpiiTrbDataset(TopDownBaseDataset):\n \"\"\"MPII-TRB Dataset dataset for top-down pose estimation.\n\n `TRB: A Novel Triplet Representation for Understanding 2D Human Body`\n ICCV'2019 More details can be found in the `paper\n <https://arxiv.org/abs/1910.11535>`__ .\n\n The dataset loads raw features and apply specified transforms\n to return a dict containing the image tensors and other information.\n\n Args:\n ann_file (str): Path to the annotation file.\n img_prefix (str): Path to a directory where images are held.\n Default: None.\n data_cfg (dict): config\n pipeline (list[dict | callable]): A sequence of data transforms.\n test_mode (bool): Store True when building test or\n validation dataset. Default: False.\n \"\"\"\n\n def __init__(self,\n ann_file,\n img_prefix,\n data_cfg,\n pipeline,\n test_mode=False):\n\n super().__init__(\n ann_file, img_prefix, data_cfg, pipeline, test_mode=test_mode)\n\n # flip_pairs in MPII-TRB\n self.ann_info['flip_pairs'] = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9],\n [10, 11], [14, 15]]\n for i in range(6):\n self.ann_info['flip_pairs'].append([16 + i, 22 + i])\n self.ann_info['flip_pairs'].append([28 + i, 34 + i])\n\n self.ann_info['upper_body_ids'] = [0, 1, 2, 3, 4, 5, 12, 13]\n self.ann_info['lower_body_ids'] = [6, 7, 8, 9, 10, 11]\n self.ann_info['upper_body_ids'].extend(list(range(14, 28)))\n self.ann_info['lower_body_ids'].extend(list(range(28, 40)))\n\n self.ann_info['use_different_joint_weights'] = False\n\n assert self.ann_info['num_joints'] == 40\n self.ann_info['joint_weights'] = np.ones(\n (self.ann_info['num_joints'], 1), dtype=np.float32)\n\n self.db = self._get_db(ann_file)\n self.image_set = set(x['image_file'] for x in self.db)\n self.num_images = len(self.image_set)\n\n print(f'=> num_images: {self.num_images}')\n print(f'=> load {len(self.db)} samples')\n\n def _get_db(self, ann_file):\n \"\"\"Load dataset.\"\"\"\n with open(ann_file, 'r') as f:\n data = json.load(f)\n tmpl = dict(\n image_file=None,\n center=None,\n scale=None,\n rotation=0,\n joints_3d=None,\n joints_3d_visible=None,\n dataset='mpii_trb')\n\n imid2info = {\n int(osp.splitext(x['file_name'])[0]): x\n for x in data['images']\n }\n\n num_joints = self.ann_info['num_joints']\n gt_db = []\n\n for anno in data['annotations']:\n newitem = cp.deepcopy(tmpl)\n image_id = anno['image_id']\n newitem['image_file'] = os.path.join(\n self.img_prefix, imid2info[image_id]['file_name'])\n\n if max(anno['keypoints']) == 0:\n continue\n\n joints_3d = np.zeros((num_joints, 3), dtype=np.float32)\n joints_3d_visible = np.zeros((num_joints, 3), dtype=np.float32)\n\n for ipt in range(num_joints):\n joints_3d[ipt, 0] = anno['keypoints'][ipt * 3 + 0]\n joints_3d[ipt, 1] = anno['keypoints'][ipt * 3 + 1]\n joints_3d[ipt, 2] = 0\n t_vis = min(anno['keypoints'][ipt * 3 + 2], 1)\n joints_3d_visible[ipt, :] = (t_vis, t_vis, 0)\n\n center = np.array(anno['center'], dtype=np.float32)\n scale = self.ann_info['image_size'] / anno['scale'] / 200.0\n newitem['center'] = center\n newitem['scale'] = scale\n newitem['joints_3d'] = joints_3d\n newitem['joints_3d_visible'] = joints_3d_visible\n if 'headbox' in anno:\n newitem['headbox'] = anno['headbox']\n gt_db.append(newitem)\n\n return gt_db\n\n def _evaluate_kernel(self, pred, joints_3d, joints_3d_visible, headbox):\n \"\"\"Evaluate one example.\"\"\"\n num_joints = self.ann_info['num_joints']\n headbox = np.array(headbox)\n threshold = np.linalg.norm(headbox[:2] - headbox[2:]) * 0.3\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n for i in range(num_joints):\n pred_pt = pred[i]\n gt_pt = joints_3d[i]\n vis = joints_3d_visible[i][0]\n if vis:\n exist[i] = 1\n else:\n continue\n distance = np.linalg.norm(pred_pt[:2] - gt_pt[:2])\n if distance < threshold:\n hit[i] = 1\n return hit, exist\n\n def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):\n \"\"\"Evaluate PCKh for MPII-TRB dataset.\n\n Note:\n batch_size: N\n num_keypoints: K\n heatmap height: H\n heatmap width: W\n\n Args:\n outputs(list(preds, boxes, image_path, heatmap)):\n\n * preds(np.ndarray[1,K,3]): The first two dimensions are\n coordinates, score is the third dimension of the array.\n * boxes(np.ndarray[1,6]): [center[0], center[1], scale[0]\n , scale[1],area, score]\n * image_path(list[str]): For example, ['0', '0',\n '0', '0', '0', '1', '1', '6', '3', '.', 'j', 'p', 'g']\n * heatmap (np.ndarray[N, K, H, W]): model output heatmap.\n res_folder(str): Path of directory to save the results.\n metric (str | list[str]): Metrics to be performed.\n Defaults: 'PCKh'.\n\n Returns:\n dict: PCKh for each joint\n \"\"\"\n metrics = metric if isinstance(metric, list) else [metric]\n allowed_metrics = ['PCKh']\n for metric in metrics:\n if metric not in allowed_metrics:\n raise KeyError(f'metric {metric} is not supported')\n\n res_file = os.path.join(res_folder, 'result_keypoints.json')\n\n kpts = []\n\n for preds, boxes, image_path, _ in outputs:\n str_image_path = ''.join(image_path)\n image_id = int(osp.basename(osp.splitext(str_image_path)[0]))\n\n kpts.append({\n 'keypoints': preds[0].tolist(),\n 'center': boxes[0][0:2].tolist(),\n 'scale': boxes[0][2:4].tolist(),\n 'area': float(boxes[0][4]),\n 'score': float(boxes[0][5]),\n 'image_id': image_id,\n })\n\n self._write_keypoint_results(kpts, res_file)\n info_str = self._report_metric(res_file)\n name_value = OrderedDict(info_str)\n\n return name_value\n\n @staticmethod\n def _write_keypoint_results(keypoints, res_file):\n \"\"\"Write results into a json file.\"\"\"\n\n with open(res_file, 'w') as f:\n json.dump(keypoints, f, sort_keys=True, indent=4)\n\n def _report_metric(self, res_file):\n \"\"\"Keypoint evaluation.\n\n Report Mean Acc of skeleton, contour and all joints.\n \"\"\"\n num_joints = self.ann_info['num_joints']\n hit = np.zeros(num_joints, dtype=np.float32)\n exist = np.zeros(num_joints, dtype=np.float32)\n\n with open(res_file, 'r') as fin:\n preds = json.load(fin)\n\n assert len(preds) == len(\n self.db), f'len(preds)={len(preds)}, len(self.db)={len(self.db)}'\n for pred, item in zip(preds, self.db):\n h, e = self._evaluate_kernel(pred['keypoints'], item['joints_3d'],\n item['joints_3d_visible'],\n item['headbox'])\n hit += h\n exist += e\n skeleton = np.sum(hit[:14]) / np.sum(exist[:14])\n contour = np.sum(hit[14:]) / np.sum(exist[14:])\n mean = np.sum(hit) / np.sum(exist)\n\n info_str = []\n info_str.append(('Skeleton_acc', skeleton.item()))\n info_str.append(('Contour_acc', contour.item()))\n info_str.append(('PCKh', mean.item()))\n return info_str\n",
"path": "mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py"
}
] | diff --git a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
index 58ef18e945..15122ba944 100644
--- a/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
+++ b/mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py
@@ -170,7 +170,7 @@ def evaluate(self, outputs, res_folder, metric='PCKh', **kwargs):
for metric in metrics:
if metric not in allowed_metrics:
raise KeyError(f'metric {metric} is not supported')
- """Evaluate MPII-TRB keypoint results."""
+
res_file = os.path.join(res_folder, 'result_keypoints.json')
kpts = []
| Pylint: W0105
```bash
mmpose/datasets/datasets/top_down/topdown_mpii_trb_dataset.py:173:8: W0105: String statement has no effect (pointless-string-statement)
```
|
holoviz__holoviews-5452 | [
{
"content": "\"\"\"\nCollection of either extremely generic or simple Operation\nexamples.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport param\n\nfrom param import _is_number\n\nfrom ..core import (Operation, NdOverlay, Overlay, GridMatrix,\n HoloMap, Dataset, Element, Collator, Dimension)\nfrom ..core.data import ArrayInterface, DictInterface, default_datatype\nfrom ..core.data.util import dask_array_module\nfrom ..core.util import (\n LooseVersion, group_sanitizer, label_sanitizer, pd, datetime_types, isfinite,\n dt_to_int, isdatetime, is_dask_array, is_cupy_array, is_ibis_expr\n)\nfrom ..element.chart import Histogram, Scatter\nfrom ..element.raster import Image, RGB\nfrom ..element.path import Contours, Polygons\nfrom ..element.util import categorical_aggregate2d # noqa (API import)\nfrom ..streams import RangeXY\n\ncolumn_interfaces = [ArrayInterface, DictInterface]\nif pd:\n from ..core.data import PandasInterface\n column_interfaces.append(PandasInterface)\n\n\ndef identity(x,k): return x\n\nclass operation(Operation):\n \"\"\"\n The most generic operation that wraps any callable into an\n Operation. The callable needs to accept an HoloViews\n component and a key (that may be ignored) and must return a new\n HoloViews component.\n\n This class may be useful for turning a HoloViews method into an\n operation to define as compositor operation. For instance, the\n following definition:\n\n operation.instance(op=lambda x, k: x.collapse(np.subtract))\n\n Could be used to implement a collapse operation to subtracts the\n data between Rasters in an Overlay.\n \"\"\"\n\n output_type = param.Parameter(None, doc=\"\"\"\n The output element type which may be None to disable type\n checking.\n\n May be used to declare useful information to other code in\n HoloViews, e.g. required for tab-completion support of operations\n registered with compositors.\"\"\")\n\n group = param.String(default='Operation', doc=\"\"\"\n The group assigned to the result after having applied the\n operator.\"\"\")\n\n op = param.Callable(default=identity, doc=\"\"\"\n The operation used to generate a new HoloViews object returned\n by the operation. By default, the identity operation is\n applied.\"\"\")\n\n def _process(self, view, key=None):\n retval = self.p.op(view, key)\n if (self.p.output_type is not None):\n assert isinstance(retval, self.p.output_type), \\\n \"Return value does not match the declared output type.\"\n return retval.relabel(group=self.p.group)\n\n\nclass factory(Operation):\n \"\"\"\n Simple operation that constructs any element that accepts some\n other element as input. For instance, RGB and HSV elements can be\n created from overlays of Image elements.\n \"\"\"\n\n output_type = param.Parameter(RGB, doc=\"\"\"\n The output type of the factor operation.\n\n By default, if three overlaid Images elements are supplied,\n the corresponding RGB element will be returned. \"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the factory\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the factory\"\"\")\n\n def _process(self, view, key=None):\n return self.p.output_type(view, *self.p.args, **self.p.kwargs)\n\n\nclass function(Operation):\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n fn = param.Callable(default=lambda el, *args, **kwargs: el, doc=\"\"\"\n The function to apply.\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n return self.p.fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass method(Operation):\n \"\"\"\n Operation that wraps a method call\n \"\"\"\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n method_name = param.String(default='__call__', doc=\"\"\"\n The method name\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n fn = getattr(self.p.input_type, self.p.method_name)\n return fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass apply_when(param.ParameterizedFunction):\n \"\"\"\n Applies a selection depending on the current zoom range. If the\n supplied predicate function returns a True it will apply the\n operation otherwise it will return the raw element after the\n selection. For example the following will apply datashading if\n the number of points in the current viewport exceed 1000 otherwise\n just returning the selected points element:\n\n apply_when(points, operation=datashade, predicate=lambda x: x > 1000)\n \"\"\"\n\n operation = param.Callable(default=lambda x: x)\n\n predicate = param.Callable(default=None)\n\n def _apply(self, element, x_range, y_range, invert=False):\n selected = element\n if x_range is not None and y_range is not None:\n selected = element[x_range, y_range]\n condition = self.predicate(selected)\n if (not invert and condition) or (invert and not condition):\n return selected\n elif selected.interface.gridded:\n return selected.clone([])\n else:\n return selected.iloc[:0]\n\n def __call__(self, obj, **params):\n if 'streams' in params:\n streams = params.pop('streams')\n else:\n streams = [RangeXY()]\n self.param.set_param(**params)\n if not self.predicate:\n raise ValueError(\n 'Must provide a predicate function to determine when '\n 'to apply the operation and when to return the selected '\n 'data.'\n )\n applied = self.operation(obj.apply(self._apply, streams=streams))\n raw = obj.apply(self._apply, streams=streams, invert=True)\n return applied * raw\n\n\nclass chain(Operation):\n \"\"\"\n Defining an Operation chain is an easy way to define a new\n Operation from a series of existing ones. The argument is a\n list of Operation (or Operation instances) that are\n called in sequence to generate the returned element.\n\n chain(operations=[gradient, threshold.instance(level=2)])\n\n This operation can accept an Image instance and would first\n compute the gradient before thresholding the result at a level of\n 2.0.\n\n Instances are only required when arguments need to be passed to\n individual operations so the resulting object is a function over a\n single argument.\n \"\"\"\n\n output_type = param.Parameter(Image, doc=\"\"\"\n The output type of the chain operation. Must be supplied if\n the chain is to be used as a channel operation.\"\"\")\n\n group = param.String(default='', doc=\"\"\"\n The group assigned to the result after having applied the chain.\n Defaults to the group produced by the last operation in the chain\"\"\")\n\n operations = param.List(default=[], class_=Operation, doc=\"\"\"\n A list of Operations (or Operation instances)\n that are applied on the input from left to right.\"\"\")\n\n def _process(self, view, key=None):\n processed = view\n for i, operation in enumerate(self.p.operations):\n processed = operation.process_element(\n processed, key, input_ranges=self.p.input_ranges\n )\n\n if not self.p.group:\n return processed\n else:\n return processed.clone(group=self.p.group)\n\n def find(self, operation, skip_nonlinked=True):\n \"\"\"\n Returns the first found occurrence of an operation while\n performing a backward traversal of the chain pipeline.\n \"\"\"\n found = None\n for op in self.operations[::-1]:\n if isinstance(op, operation):\n found = op\n break\n if not op.link_inputs and skip_nonlinked:\n break\n return found\n\n\nclass transform(Operation):\n \"\"\"\n Generic Operation to transform an input Image or RGBA\n element into an output Image. The transformation is defined by\n the supplied callable that accepts the data of the input Image\n (typically a numpy array) and returns the transformed data of the\n output Image.\n\n This operator is extremely versatile; for instance, you could\n implement an alternative to the explicit threshold operator with:\n\n operator=lambda x: np.clip(x, 0, 0.5)\n\n Alternatively, you can implement a transform computing the 2D\n autocorrelation using the scipy library with:\n\n operator=lambda x: scipy.signal.correlate2d(x, x)\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the result after applying the\n transform.\"\"\")\n\n operator = param.Callable(doc=\"\"\"\n Function of one argument that transforms the data in the input\n Image to the data in the output Image. By default, acts as\n the identity function such that the output matches the input.\"\"\")\n\n def _process(self, img, key=None):\n processed = (img.data if not self.p.operator\n else self.p.operator(img.data))\n return img.clone(processed, group=self.p.group)\n\n\nclass image_overlay(Operation):\n \"\"\"\n Operation to build a overlay of images to a specification from a\n subset of the required elements.\n\n This is useful for reordering the elements of an overlay,\n duplicating layers of an overlay or creating blank image elements\n in the appropriate positions.\n\n For instance, image_overlay may build a three layered input\n suitable for the RGB factory operation even if supplied with one\n or two of the required channels (creating blank channels for the\n missing elements).\n\n Note that if there is any ambiguity regarding the match, the\n strongest match will be used. In the case of a tie in match\n strength, the first layer in the input is used. One successful\n match is always required.\n \"\"\"\n\n output_type = Overlay\n\n spec = param.String(doc=\"\"\"\n Specification of the output Overlay structure. For instance:\n\n Image.R * Image.G * Image.B\n\n Will ensure an overlay of this structure is created even if\n (for instance) only (Image.R * Image.B) is supplied.\n\n Elements in the input overlay that match are placed in the\n appropriate positions and unavailable specification elements\n are created with the specified fill group.\"\"\")\n\n fill = param.Number(default=0)\n\n default_range = param.Tuple(default=(0,1), doc=\"\"\"\n The default range that will be set on the value_dimension of\n any automatically created blank image elements.\"\"\")\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the resulting overlay.\"\"\")\n\n\n @classmethod\n def _match(cls, el, spec):\n \"Return the strength of the match (None if no match)\"\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n if not isinstance(el, Image) or spec_dict['type'] != 'Image':\n raise NotImplementedError(\"Only Image currently supported\")\n\n sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}\n strength = 1\n for key in ['group', 'label']:\n attr_value = sanitizers[key](getattr(el, key))\n if key in spec_dict:\n if spec_dict[key] != attr_value: return None\n strength += 1\n return strength\n\n\n def _match_overlay(self, raster, overlay_spec):\n \"\"\"\n Given a raster or input overlay, generate a list of matched\n elements (None if no match) and corresponding tuple of match\n strength values.\n \"\"\"\n ordering = [None]*len(overlay_spec) # Elements to overlay\n strengths = [0]*len(overlay_spec) # Match strengths\n\n elements = raster.values() if isinstance(raster, Overlay) else [raster]\n\n for el in elements:\n for pos in range(len(overlay_spec)):\n strength = self._match(el, overlay_spec[pos])\n if strength is None: continue # No match\n elif (strength <= strengths[pos]): continue # Weaker match\n else: # Stronger match\n ordering[pos] = el\n strengths[pos] = strength\n return ordering, strengths\n\n\n def _process(self, raster, key=None):\n specs = tuple(el.strip() for el in self.p.spec.split('*'))\n ordering, strengths = self._match_overlay(raster, specs)\n if all(el is None for el in ordering):\n raise Exception(\"The image_overlay operation requires at least one match\")\n\n completed = []\n strongest = ordering[np.argmax(strengths)]\n for el, spec in zip(ordering, specs):\n if el is None:\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n el = Image(np.ones(strongest.data.shape) * self.p.fill,\n group=spec_dict.get('group','Image'),\n label=spec_dict.get('label',''))\n el.vdims[0].range = self.p.default_range\n completed.append(el)\n return np.prod(completed)\n\n\n\nclass threshold(Operation):\n \"\"\"\n Threshold a given Image whereby all values higher than a given\n level map to the specified high value and all values lower than\n that level map to the specified low value.\n \"\"\"\n output_type = Image\n\n level = param.Number(default=0.5, doc=\"\"\"\n The value at which the threshold is applied. Values lower than\n the threshold map to the 'low' value and values above map to\n the 'high' value.\"\"\")\n\n high = param.Number(default=1.0, doc=\"\"\"\n The value given to elements greater than (or equal to) the\n threshold.\"\"\")\n\n low = param.Number(default=0.0, doc=\"\"\"\n The value given to elements below the threshold.\"\"\")\n\n group = param.String(default='Threshold', doc=\"\"\"\n The group assigned to the thresholded output.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if not isinstance(matrix, Image):\n raise TypeError(\"The threshold operation requires a Image as input.\")\n\n arr = matrix.data\n high = np.ones(arr.shape) * self.p.high\n low = np.ones(arr.shape) * self.p.low\n thresholded = np.where(arr > self.p.level, high, low)\n\n return matrix.clone(thresholded, group=self.p.group)\n\n\n\nclass gradient(Operation):\n \"\"\"\n Compute the gradient plot of the supplied Image.\n\n If the Image value dimension is cyclic, the smallest step is taken\n considered the cyclic range\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Gradient', doc=\"\"\"\n The group assigned to the output gradient matrix.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if len(matrix.vdims) != 1:\n raise ValueError(\"Input matrix to gradient operation must \"\n \"have single value dimension.\")\n\n matrix_dim = matrix.vdims[0]\n\n data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))\n r, c = data.shape\n\n if matrix_dim.cyclic and (None in matrix_dim.range):\n raise Exception(\"Cyclic range must be specified to compute \"\n \"the gradient of cyclic quantities\")\n cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)\n if cyclic_range is not None:\n # shift values such that wrapping works ok\n data = data - matrix_dim.range[0]\n\n dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]\n dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]\n\n if cyclic_range is not None: # Wrap into the specified range\n # Convert negative differences to an equivalent positive value\n dx = dx % cyclic_range\n dy = dy % cyclic_range\n #\n # Prefer small jumps\n dx_negatives = dx - cyclic_range\n dy_negatives = dy - cyclic_range\n dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)\n dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)\n\n return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)\n\n\n\nclass convolve(Operation):\n \"\"\"\n Apply a convolution to an overlay using the top layer as the\n kernel for convolving the bottom layer. Both Image elements in\n the input overlay should have a single value dimension.\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Convolution', doc=\"\"\"\n The group assigned to the convolved output.\"\"\")\n\n kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc=\"\"\"\n A 2-dimensional slice of the kernel layer to use in the\n convolution in lbrt (left, bottom, right, top) format. By\n default, no slicing is applied.\"\"\")\n\n _per_element = True\n\n def _process(self, overlay, key=None):\n if len(overlay) != 2:\n raise Exception(\"Overlay must contain at least to items.\")\n\n [target, kernel] = overlay.get(0), overlay.get(1)\n\n if len(target.vdims) != 1:\n raise Exception(\"Convolution requires inputs with single value dimensions.\")\n\n xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])\n yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])\n\n k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data\n\n data = np.flipud(target.dimension_values(2, flat=False))\n fft1 = np.fft.fft2(data)\n fft2 = np.fft.fft2(k, s=data.shape)\n convolved_raw = np.fft.ifft2(fft1 * fft2).real\n\n k_rows, k_cols = k.shape\n rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)\n convolved = rolled / float(k.sum())\n\n return Image(convolved, bounds=target.bounds, group=self.p.group)\n\n\n\nclass contours(Operation):\n \"\"\"\n Given a Image with a single channel, annotate it with contour\n lines for a given set of contour levels.\n\n The return is an NdOverlay with a Contours layer for each given\n level, overlaid on top of the input Image.\n \"\"\"\n\n output_type = Overlay\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n group = param.String(default='Level', doc=\"\"\"\n The group assigned to the output contours.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether to generate filled contours\"\"\")\n\n overlaid = param.Boolean(default=False, doc=\"\"\"\n Whether to overlay the contour on the supplied Element.\"\"\")\n\n _per_element = True\n\n def _process(self, element, key=None):\n try:\n from matplotlib.contour import QuadContourSet\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n from matplotlib.dates import num2date, date2num\n except ImportError:\n raise ImportError(\"contours operation requires matplotlib.\")\n extent = element.range(0) + element.range(1)[::-1]\n\n xs = element.dimension_values(0, True, flat=False)\n ys = element.dimension_values(1, True, flat=False)\n zs = element.dimension_values(2, flat=False)\n\n # Ensure that coordinate arrays specify bin centers\n if xs.shape[0] != zs.shape[0]:\n xs = xs[:-1] + np.diff(xs, axis=0)/2.\n if xs.shape[1] != zs.shape[1]:\n xs = xs[:, :-1] + (np.diff(xs, axis=1)/2.)\n if ys.shape[0] != zs.shape[0]:\n ys = ys[:-1] + np.diff(ys, axis=0)/2.\n if ys.shape[1] != zs.shape[1]:\n ys = ys[:, :-1] + (np.diff(ys, axis=1)/2.)\n data = (xs, ys, zs)\n\n # if any data is a datetime, transform to matplotlib's numerical format\n data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data))\n if any(data_is_datetime):\n data = tuple(\n date2num(d) if is_datetime else d\n for d, is_datetime in zip(data, data_is_datetime)\n )\n\n xdim, ydim = element.dimensions('key', label=True)\n if self.p.filled:\n contour_type = Polygons\n else:\n contour_type = Contours\n vdims = element.vdims[:1]\n\n kwargs = {}\n levels = self.p.levels\n zmin, zmax = element.range(2)\n if isinstance(self.p.levels, int):\n if zmin == zmax:\n contours = contour_type([], [xdim, ydim], vdims)\n return (element * contours) if self.p.overlaid else contours\n data += (levels,)\n else:\n kwargs = {'levels': levels}\n\n fig = Figure()\n ax = Axes(fig, [0, 0, 1, 1])\n contour_set = QuadContourSet(ax, *data, filled=self.p.filled,\n extent=extent, **kwargs)\n levels = np.array(contour_set.get_array())\n crange = levels.min(), levels.max()\n if self.p.filled:\n levels = levels[:-1] + np.diff(levels)/2.\n vdims = [vdims[0].clone(range=crange)]\n\n paths = []\n empty = np.array([[np.nan, np.nan]])\n for level, cset in zip(levels, contour_set.collections):\n exteriors = []\n interiors = []\n for geom in cset.get_paths():\n interior = []\n polys = geom.to_polygons(closed_only=False)\n for ncp, cp in enumerate(polys):\n if any(data_is_datetime[0:2]):\n # transform x/y coordinates back to datetimes\n xs, ys = np.split(cp, 2, axis=1)\n if data_is_datetime[0]:\n xs = np.array(num2date(xs))\n if data_is_datetime[1]:\n ys = np.array(num2date(ys))\n cp = np.concatenate((xs, ys), axis=1)\n if ncp == 0:\n exteriors.append(cp)\n exteriors.append(empty)\n else:\n interior.append(cp)\n if len(polys):\n interiors.append(interior)\n if not exteriors:\n continue\n geom = {\n element.vdims[0].name:\n num2date(level) if data_is_datetime[2] else level,\n (xdim, ydim): np.concatenate(exteriors[:-1])\n }\n if self.p.filled and interiors:\n geom['holes'] = interiors\n paths.append(geom)\n contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)\n if self.p.overlaid:\n contours = element * contours\n return contours\n\n\nclass histogram(Operation):\n \"\"\"\n Returns a Histogram of the input element data, binned into\n num_bins over the bin_range (if specified) along the specified\n dimension.\n \"\"\"\n\n bin_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n Specifies the range within which to compute the bins.\"\"\")\n\n bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc=\"\"\"\n An explicit set of bin edges or a method to find the optimal\n set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more\n documentation on these approaches see the np.histogram_bin_edges\n documentation.\"\"\")\n\n cumulative = param.Boolean(default=False, doc=\"\"\"\n Whether to compute the cumulative histogram\"\"\")\n\n dimension = param.String(default=None, doc=\"\"\"\n Along which dimension of the Element to compute the histogram.\"\"\")\n\n frequency_label = param.String(default=None, doc=\"\"\"\n Format string defining the label of the frequency dimension of the Histogram.\"\"\")\n\n groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc=\"\"\"\n Defines a dimension to group the Histogram returning an NdOverlay of Histograms.\"\"\")\n\n log = param.Boolean(default=False, doc=\"\"\"\n Whether to use base 10 logarithmic samples for the bin edges.\"\"\")\n\n mean_weighted = param.Boolean(default=False, doc=\"\"\"\n Whether the weighted frequencies are averaged.\"\"\")\n\n normed = param.ObjectSelector(default=False,\n objects=[True, False, 'integral', 'height'],\n doc=\"\"\"\n Controls normalization behavior. If `True` or `'integral'`, then\n `density=True` is passed to np.histogram, and the distribution\n is normalized such that the integral is unity. If `False`,\n then the frequencies will be raw counts. If `'height'`, then the\n frequencies are normalized such that the max bin height is unity.\"\"\")\n\n nonzero = param.Boolean(default=False, doc=\"\"\"\n Whether to use only nonzero values when computing the histogram\"\"\")\n\n num_bins = param.Integer(default=20, doc=\"\"\"\n Number of bins in the histogram .\"\"\")\n\n weight_dimension = param.String(default=None, doc=\"\"\"\n Name of the dimension the weighting should be drawn from\"\"\")\n\n style_prefix = param.String(default=None, allow_None=None, doc=\"\"\"\n Used for setting a common style for histograms in a HoloMap or AdjointLayout.\"\"\")\n\n def _process(self, element, key=None):\n if self.p.groupby:\n if not isinstance(element, Dataset):\n raise ValueError('Cannot use histogram groupby on non-Dataset Element')\n grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)\n self.p.groupby = None\n return grouped.map(self._process, Dataset)\n\n normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed\n if self.p.dimension:\n selected_dim = self.p.dimension\n else:\n selected_dim = [d.name for d in element.vdims + element.kdims][0]\n dim = element.get_dimension(selected_dim)\n\n if hasattr(element, 'interface'):\n data = element.interface.values(element, selected_dim, compute=False)\n else:\n data = element.dimension_values(selected_dim)\n\n is_datetime = isdatetime(data)\n if is_datetime:\n data = data.astype('datetime64[ns]').astype('int64')\n\n # Handle different datatypes\n is_finite = isfinite\n is_cupy = is_cupy_array(data)\n if is_cupy:\n import cupy\n full_cupy_support = LooseVersion(cupy.__version__) > LooseVersion('8.0')\n if not full_cupy_support and (normed or self.p.weight_dimension):\n data = cupy.asnumpy(data)\n is_cupy = False\n else:\n is_finite = cupy.isfinite\n\n # Mask data\n if is_ibis_expr(data):\n mask = data.notnull()\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data.to_projection()\n data = data[mask]\n no_data = not len(data.head(1).execute())\n data = data[dim.name]\n else:\n mask = is_finite(data)\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data[mask]\n da = dask_array_module()\n no_data = False if da and isinstance(data, da.Array) else not len(data)\n\n # Compute weights\n if self.p.weight_dimension:\n if hasattr(element, 'interface'):\n weights = element.interface.values(element, self.p.weight_dimension, compute=False)\n else:\n weights = element.dimension_values(self.p.weight_dimension)\n weights = weights[mask]\n else:\n weights = None\n\n # Compute bins\n if isinstance(self.p.bins, str):\n bin_data = cupy.asnumpy(data) if is_cupy else data\n edges = np.histogram_bin_edges(bin_data, bins=self.p.bins)\n elif isinstance(self.p.bins, (list, np.ndarray)):\n edges = self.p.bins\n if isdatetime(edges):\n edges = edges.astype('datetime64[ns]').astype('int64')\n else:\n hist_range = self.p.bin_range or element.range(selected_dim)\n # Suppress a warning emitted by Numpy when datetime or timedelta scalars\n # are compared. See https://github.com/numpy/numpy/issues/10095 and\n # https://github.com/numpy/numpy/issues/9210. \n with warnings.catch_warnings():\n warnings.filterwarnings(\n action='ignore', message='elementwise comparison failed',\n category=DeprecationWarning\n )\n null_hist_range = hist_range == (0, 0)\n # Avoids range issues including zero bin range and empty bins\n if null_hist_range or any(not isfinite(r) for r in hist_range):\n hist_range = (0, 1)\n steps = self.p.num_bins + 1\n start, end = hist_range\n if is_datetime:\n start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')\n if self.p.log:\n bin_min = max([abs(start), data[data>0].min()])\n edges = np.logspace(np.log10(bin_min), np.log10(end), steps)\n else:\n edges = np.linspace(start, end, steps)\n if is_cupy:\n edges = cupy.asarray(edges)\n\n if not is_dask_array(data) and no_data:\n nbins = self.p.num_bins if self.p.bins is None else len(self.p.bins)-1\n hist = np.zeros(nbins)\n elif hasattr(element, 'interface'):\n density = True if normed else False\n hist, edges = element.interface.histogram(\n data, edges, density=density, weights=weights\n )\n if normed == 'height':\n hist /= hist.max()\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = element.interface.histogram(\n data, density=False, bins=edges\n )\n hist /= hist_mean\n elif normed:\n # This covers True, 'height', 'integral'\n hist, edges = np.histogram(data, density=True,\n weights=weights, bins=edges)\n if normed == 'height':\n hist /= hist.max()\n else:\n hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges)\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins)\n hist /= hist_mean\n\n hist[np.isnan(hist)] = 0\n if is_datetime:\n edges = (edges/1e3).astype('datetime64[us]')\n\n params = {}\n if self.p.weight_dimension:\n params['vdims'] = [element.get_dimension(self.p.weight_dimension)]\n elif self.p.frequency_label:\n label = self.p.frequency_label.format(dim=dim.pprint_label)\n params['vdims'] = [Dimension('Frequency', label=label)]\n else:\n label = 'Frequency' if normed else 'Count'\n params['vdims'] = [Dimension('{0}_{1}'.format(dim.name, label.lower()),\n label=label)]\n\n if element.group != element.__class__.__name__:\n params['group'] = element.group\n\n if self.p.cumulative:\n hist = np.cumsum(hist)\n if self.p.normed in (True, 'integral'):\n hist *= edges[1]-edges[0]\n\n # Save off the computed bin edges so that if this operation instance\n # is used to compute another histogram, it will default to the same\n # bin edges.\n self.bins = list(edges)\n return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)],\n label=element.label, **params)\n\n\nclass decimate(Operation):\n \"\"\"\n Decimates any column based Element to a specified number of random\n rows if the current element defined by the x_range and y_range\n contains more than max_samples. By default the operation returns a\n DynamicMap with a RangeXY stream allowing dynamic downsampling.\n \"\"\"\n\n dynamic = param.Boolean(default=True, doc=\"\"\"\n Enables dynamic processing by default.\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying shade, backends that support linked streams\n update RangeXY streams on the inputs of the shade operation.\"\"\")\n\n max_samples = param.Integer(default=5000, doc=\"\"\"\n Maximum number of samples to display at the same time.\"\"\")\n\n random_seed = param.Integer(default=42, doc=\"\"\"\n Seed used to initialize randomization.\"\"\")\n\n streams = param.ClassSelector(default=[RangeXY], class_=(dict, list),\n doc=\"\"\"\n List of streams that are applied if dynamic=True, allowing\n for dynamic interaction with the plot.\"\"\")\n\n x_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max x-value. Auto-ranges\n if set to None.\"\"\")\n\n y_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max y-value. Auto-ranges\n if set to None.\"\"\")\n\n _per_element = True\n\n def _process_layer(self, element, key=None):\n if not isinstance(element, Dataset):\n raise ValueError(\"Cannot downsample non-Dataset types.\")\n if element.interface not in column_interfaces:\n element = element.clone(tuple(element.columns().values()))\n\n xstart, xend = self.p.x_range if self.p.x_range else element.range(0)\n ystart, yend = self.p.y_range if self.p.y_range else element.range(1)\n\n # Slice element to current ranges\n xdim, ydim = element.dimensions(label=True)[0:2]\n sliced = element.select(**{xdim: (xstart, xend),\n ydim: (ystart, yend)})\n\n if len(sliced) > self.p.max_samples:\n prng = np.random.RandomState(self.p.random_seed)\n return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]\n return sliced\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\nclass interpolate_curve(Operation):\n \"\"\"\n Resamples a Curve using the defined interpolation method, e.g.\n to represent changes in y-values as steps.\n \"\"\"\n\n interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',\n 'steps-post', 'linear'],\n default='steps-mid', doc=\"\"\"\n Controls the transition point of the step along the x-axis.\"\"\")\n\n _per_element = True\n\n @classmethod\n def pts_to_prestep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[0:-2:2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[2::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_midstep(cls, x, values):\n steps = np.zeros(2 * len(x))\n value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)\n\n steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2\n steps[0], steps[-1] = x[0], x[-1]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_poststep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[2::2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0:-2:2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n def _process_layer(self, element, key=None):\n INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,\n 'steps-mid': self.pts_to_midstep,\n 'steps-post': self.pts_to_poststep}\n if self.p.interpolation not in INTERPOLATE_FUNCS:\n return element\n x = element.dimension_values(0)\n is_datetime = isdatetime(x)\n if is_datetime:\n dt_type = 'datetime64[ns]'\n x = x.astype(dt_type)\n dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])\n xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals)\n if is_datetime:\n xs = xs.astype(dt_type)\n return element.clone((xs,)+dvals)\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\n#==================#\n# Other operations #\n#==================#\n\n\nclass collapse(Operation):\n \"\"\"\n Given an overlay of Element types, collapse into single Element\n object using supplied function. Collapsing aggregates over the\n key dimensions of each object applying the supplied fn to each group.\n\n This is an example of an Operation that does not involve\n any Raster types.\n \"\"\"\n\n fn = param.Callable(default=np.mean, doc=\"\"\"\n The function that is used to collapse the curve y-values for\n each x-value.\"\"\")\n\n def _process(self, overlay, key=None):\n if isinstance(overlay, NdOverlay):\n collapse_map = HoloMap(overlay)\n else:\n collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})\n return collapse_map.collapse(function=self.p.fn)\n\n\nclass gridmatrix(param.ParameterizedFunction):\n \"\"\"\n The gridmatrix operation takes an Element or HoloMap\n of Elements as input and creates a GridMatrix object,\n which plots each dimension in the Element against\n each other dimension. This provides a very useful\n overview of high-dimensional data and is inspired\n by pandas and seaborn scatter_matrix implementations.\n \"\"\"\n\n chart_type = param.Parameter(default=Scatter, doc=\"\"\"\n The Element type used to display bivariate distributions\n of the data.\"\"\")\n\n diagonal_type = param.Parameter(default=None, doc=\"\"\"\n The Element type along the diagonal, may be a Histogram or any\n other plot type which can visualize a univariate distribution.\n This parameter overrides diagonal_operation.\"\"\")\n\n diagonal_operation = param.Parameter(default=histogram, doc=\"\"\"\n The operation applied along the diagonal, may be a histogram-operation\n or any other function which returns a viewable element.\"\"\")\n\n overlay_dims = param.List(default=[], doc=\"\"\"\n If a HoloMap is supplied, this will allow overlaying one or\n more of its key dimensions.\"\"\")\n\n def __call__(self, data, **params):\n p = param.ParamOverrides(self, params)\n\n if isinstance(data, (HoloMap, NdOverlay)):\n ranges = {d.name: data.range(d) for d in data.dimensions()}\n data = data.clone({k: GridMatrix(self._process(p, v, ranges))\n for k, v in data.items()})\n data = Collator(data, merge_type=type(data))()\n if p.overlay_dims:\n data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))\n return data\n elif isinstance(data, Element):\n data = self._process(p, data)\n return GridMatrix(data)\n\n\n def _process(self, p, element, ranges={}):\n # Creates a unified Dataset.data attribute\n # to draw the data from\n if isinstance(element.data, np.ndarray):\n el_data = element.table(default_datatype)\n else:\n el_data = element.data\n\n # Get dimensions to plot against each other\n types = (str, np.str_, np.object_)+datetime_types\n dims = [d for d in element.dimensions()\n if _is_number(element.range(d)[0]) and\n not issubclass(element.get_dimension_type(d), types)]\n permuted_dims = [(d1, d2) for d1 in dims\n for d2 in dims[::-1]]\n\n # Convert Histogram type to operation to avoid one case in the if below.\n if p.diagonal_type is Histogram:\n p.diagonal_type = None\n p.diagonal_operation = histogram\n\n data = {}\n for d1, d2 in permuted_dims:\n if d1 == d2:\n if p.diagonal_type is not None:\n if p.diagonal_type._auto_indexable_1d:\n el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],\n datatype=[default_datatype])\n else:\n values = element.dimension_values(d1)\n el = p.diagonal_type(values, kdims=[d1])\n elif p.diagonal_operation is None:\n continue\n elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):\n bin_range = ranges.get(d1.name, element.range(d1))\n el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)\n else:\n el = p.diagonal_operation(element, dimension=d1.name)\n else:\n kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)\n el = p.chart_type(el_data, kdims=kdims, vdims=vdims,\n datatype=[default_datatype])\n data[(d1.name, d2.name)] = el\n return data\n",
"path": "holoviews/operation/element.py"
}
] | [
{
"content": "\"\"\"\nCollection of either extremely generic or simple Operation\nexamples.\n\"\"\"\nimport warnings\n\nimport numpy as np\nimport param\n\nfrom param import _is_number\n\nfrom ..core import (Operation, NdOverlay, Overlay, GridMatrix,\n HoloMap, Dataset, Element, Collator, Dimension)\nfrom ..core.data import ArrayInterface, DictInterface, default_datatype\nfrom ..core.data.util import dask_array_module\nfrom ..core.util import (\n LooseVersion, group_sanitizer, label_sanitizer, pd, datetime_types, isfinite,\n dt_to_int, isdatetime, is_dask_array, is_cupy_array, is_ibis_expr\n)\nfrom ..element.chart import Histogram, Scatter\nfrom ..element.raster import Image, RGB\nfrom ..element.path import Contours, Polygons\nfrom ..element.util import categorical_aggregate2d # noqa (API import)\nfrom ..streams import RangeXY\n\ncolumn_interfaces = [ArrayInterface, DictInterface]\nif pd:\n from ..core.data import PandasInterface\n column_interfaces.append(PandasInterface)\n\n\ndef identity(x,k): return x\n\nclass operation(Operation):\n \"\"\"\n The most generic operation that wraps any callable into an\n Operation. The callable needs to accept an HoloViews\n component and a key (that may be ignored) and must return a new\n HoloViews component.\n\n This class may be useful for turning a HoloViews method into an\n operation to define as compositor operation. For instance, the\n following definition:\n\n operation.instance(op=lambda x, k: x.collapse(np.subtract))\n\n Could be used to implement a collapse operation to subtracts the\n data between Rasters in an Overlay.\n \"\"\"\n\n output_type = param.Parameter(None, doc=\"\"\"\n The output element type which may be None to disable type\n checking.\n\n May be used to declare useful information to other code in\n HoloViews, e.g. required for tab-completion support of operations\n registered with compositors.\"\"\")\n\n group = param.String(default='Operation', doc=\"\"\"\n The group assigned to the result after having applied the\n operator.\"\"\")\n\n op = param.Callable(default=identity, doc=\"\"\"\n The operation used to generate a new HoloViews object returned\n by the operation. By default, the identity operation is\n applied.\"\"\")\n\n def _process(self, view, key=None):\n retval = self.p.op(view, key)\n if (self.p.output_type is not None):\n assert isinstance(retval, self.p.output_type), \\\n \"Return value does not match the declared output type.\"\n return retval.relabel(group=self.p.group)\n\n\nclass factory(Operation):\n \"\"\"\n Simple operation that constructs any element that accepts some\n other element as input. For instance, RGB and HSV elements can be\n created from overlays of Image elements.\n \"\"\"\n\n output_type = param.Parameter(RGB, doc=\"\"\"\n The output type of the factor operation.\n\n By default, if three overlaid Images elements are supplied,\n the corresponding RGB element will be returned. \"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the factory\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the factory\"\"\")\n\n def _process(self, view, key=None):\n return self.p.output_type(view, *self.p.args, **self.p.kwargs)\n\n\nclass function(Operation):\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n fn = param.Callable(default=lambda el, *args, **kwargs: el, doc=\"\"\"\n The function to apply.\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n return self.p.fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass method(Operation):\n \"\"\"\n Operation that wraps a method call\n \"\"\"\n\n output_type = param.ClassSelector(class_=type, doc=\"\"\"\n The output type of the method operation\"\"\")\n\n input_type = param.ClassSelector(class_=type, doc=\"\"\"\n The object type the method is defined on\"\"\")\n\n method_name = param.String(default='__call__', doc=\"\"\"\n The method name\"\"\")\n\n args = param.List(default=[], doc=\"\"\"\n The list of positional argument to pass to the method\"\"\")\n\n kwargs = param.Dict(default={}, doc=\"\"\"\n The dict of keyword arguments to pass to the method\"\"\")\n\n def _process(self, element, key=None):\n fn = getattr(self.p.input_type, self.p.method_name)\n return fn(element, *self.p.args, **self.p.kwargs)\n\n\nclass apply_when(param.ParameterizedFunction):\n \"\"\"\n Applies a selection depending on the current zoom range. If the\n supplied predicate function returns a True it will apply the\n operation otherwise it will return the raw element after the\n selection. For example the following will apply datashading if\n the number of points in the current viewport exceed 1000 otherwise\n just returning the selected points element:\n\n apply_when(points, operation=datashade, predicate=lambda x: x > 1000)\n \"\"\"\n\n operation = param.Callable(default=lambda x: x)\n\n predicate = param.Callable(default=None)\n\n def _apply(self, element, x_range, y_range, invert=False):\n selected = element\n if x_range is not None and y_range is not None:\n selected = element[x_range, y_range]\n condition = self.predicate(selected)\n if (not invert and condition) or (invert and not condition):\n return selected\n elif selected.interface.gridded:\n return selected.clone([])\n else:\n return selected.iloc[:0]\n\n def __call__(self, obj, **params):\n if 'streams' in params:\n streams = params.pop('streams')\n else:\n streams = [RangeXY()]\n self.param.set_param(**params)\n if not self.predicate:\n raise ValueError(\n 'Must provide a predicate function to determine when '\n 'to apply the operation and when to return the selected '\n 'data.'\n )\n applied = self.operation(obj.apply(self._apply, streams=streams))\n raw = obj.apply(self._apply, streams=streams, invert=True)\n return applied * raw\n\n\nclass chain(Operation):\n \"\"\"\n Defining an Operation chain is an easy way to define a new\n Operation from a series of existing ones. The argument is a\n list of Operation (or Operation instances) that are\n called in sequence to generate the returned element.\n\n chain(operations=[gradient, threshold.instance(level=2)])\n\n This operation can accept an Image instance and would first\n compute the gradient before thresholding the result at a level of\n 2.0.\n\n Instances are only required when arguments need to be passed to\n individual operations so the resulting object is a function over a\n single argument.\n \"\"\"\n\n output_type = param.Parameter(Image, doc=\"\"\"\n The output type of the chain operation. Must be supplied if\n the chain is to be used as a channel operation.\"\"\")\n\n group = param.String(default='', doc=\"\"\"\n The group assigned to the result after having applied the chain.\n Defaults to the group produced by the last operation in the chain\"\"\")\n\n operations = param.List(default=[], class_=Operation, doc=\"\"\"\n A list of Operations (or Operation instances)\n that are applied on the input from left to right.\"\"\")\n\n def _process(self, view, key=None):\n processed = view\n for i, operation in enumerate(self.p.operations):\n processed = operation.process_element(\n processed, key, input_ranges=self.p.input_ranges\n )\n\n if not self.p.group:\n return processed\n else:\n return processed.clone(group=self.p.group)\n\n def find(self, operation, skip_nonlinked=True):\n \"\"\"\n Returns the first found occurrence of an operation while\n performing a backward traversal of the chain pipeline.\n \"\"\"\n found = None\n for op in self.operations[::-1]:\n if isinstance(op, operation):\n found = op\n break\n if not op.link_inputs and skip_nonlinked:\n break\n return found\n\n\nclass transform(Operation):\n \"\"\"\n Generic Operation to transform an input Image or RGBA\n element into an output Image. The transformation is defined by\n the supplied callable that accepts the data of the input Image\n (typically a numpy array) and returns the transformed data of the\n output Image.\n\n This operator is extremely versatile; for instance, you could\n implement an alternative to the explicit threshold operator with:\n\n operator=lambda x: np.clip(x, 0, 0.5)\n\n Alternatively, you can implement a transform computing the 2D\n autocorrelation using the scipy library with:\n\n operator=lambda x: scipy.signal.correlate2d(x, x)\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the result after applying the\n transform.\"\"\")\n\n operator = param.Callable(doc=\"\"\"\n Function of one argument that transforms the data in the input\n Image to the data in the output Image. By default, acts as\n the identity function such that the output matches the input.\"\"\")\n\n def _process(self, img, key=None):\n processed = (img.data if not self.p.operator\n else self.p.operator(img.data))\n return img.clone(processed, group=self.p.group)\n\n\nclass image_overlay(Operation):\n \"\"\"\n Operation to build a overlay of images to a specification from a\n subset of the required elements.\n\n This is useful for reordering the elements of an overlay,\n duplicating layers of an overlay or creating blank image elements\n in the appropriate positions.\n\n For instance, image_overlay may build a three layered input\n suitable for the RGB factory operation even if supplied with one\n or two of the required channels (creating blank channels for the\n missing elements).\n\n Note that if there is any ambiguity regarding the match, the\n strongest match will be used. In the case of a tie in match\n strength, the first layer in the input is used. One successful\n match is always required.\n \"\"\"\n\n output_type = Overlay\n\n spec = param.String(doc=\"\"\"\n Specification of the output Overlay structure. For instance:\n\n Image.R * Image.G * Image.B\n\n Will ensure an overlay of this structure is created even if\n (for instance) only (Image.R * Image.B) is supplied.\n\n Elements in the input overlay that match are placed in the\n appropriate positions and unavailable specification elements\n are created with the specified fill group.\"\"\")\n\n fill = param.Number(default=0)\n\n default_range = param.Tuple(default=(0,1), doc=\"\"\"\n The default range that will be set on the value_dimension of\n any automatically created blank image elements.\"\"\")\n\n group = param.String(default='Transform', doc=\"\"\"\n The group assigned to the resulting overlay.\"\"\")\n\n\n @classmethod\n def _match(cls, el, spec):\n \"Return the strength of the match (None if no match)\"\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n if not isinstance(el, Image) or spec_dict['type'] != 'Image':\n raise NotImplementedError(\"Only Image currently supported\")\n\n sanitizers = {'group':group_sanitizer, 'label':label_sanitizer}\n strength = 1\n for key in ['group', 'label']:\n attr_value = sanitizers[key](getattr(el, key))\n if key in spec_dict:\n if spec_dict[key] != attr_value: return None\n strength += 1\n return strength\n\n\n def _match_overlay(self, raster, overlay_spec):\n \"\"\"\n Given a raster or input overlay, generate a list of matched\n elements (None if no match) and corresponding tuple of match\n strength values.\n \"\"\"\n ordering = [None]*len(overlay_spec) # Elements to overlay\n strengths = [0]*len(overlay_spec) # Match strengths\n\n elements = raster.values() if isinstance(raster, Overlay) else [raster]\n\n for el in elements:\n for pos in range(len(overlay_spec)):\n strength = self._match(el, overlay_spec[pos])\n if strength is None: continue # No match\n elif (strength <= strengths[pos]): continue # Weaker match\n else: # Stronger match\n ordering[pos] = el\n strengths[pos] = strength\n return ordering, strengths\n\n\n def _process(self, raster, key=None):\n specs = tuple(el.strip() for el in self.p.spec.split('*'))\n ordering, strengths = self._match_overlay(raster, specs)\n if all(el is None for el in ordering):\n raise Exception(\"The image_overlay operation requires at least one match\")\n\n completed = []\n strongest = ordering[np.argmax(strengths)]\n for el, spec in zip(ordering, specs):\n if el is None:\n spec_dict = dict(zip(['type', 'group', 'label'], spec.split('.')))\n el = Image(np.ones(strongest.data.shape) * self.p.fill,\n group=spec_dict.get('group','Image'),\n label=spec_dict.get('label',''))\n el.vdims[0].range = self.p.default_range\n completed.append(el)\n return np.prod(completed)\n\n\n\nclass threshold(Operation):\n \"\"\"\n Threshold a given Image whereby all values higher than a given\n level map to the specified high value and all values lower than\n that level map to the specified low value.\n \"\"\"\n output_type = Image\n\n level = param.Number(default=0.5, doc=\"\"\"\n The value at which the threshold is applied. Values lower than\n the threshold map to the 'low' value and values above map to\n the 'high' value.\"\"\")\n\n high = param.Number(default=1.0, doc=\"\"\"\n The value given to elements greater than (or equal to) the\n threshold.\"\"\")\n\n low = param.Number(default=0.0, doc=\"\"\"\n The value given to elements below the threshold.\"\"\")\n\n group = param.String(default='Threshold', doc=\"\"\"\n The group assigned to the thresholded output.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if not isinstance(matrix, Image):\n raise TypeError(\"The threshold operation requires a Image as input.\")\n\n arr = matrix.data\n high = np.ones(arr.shape) * self.p.high\n low = np.ones(arr.shape) * self.p.low\n thresholded = np.where(arr > self.p.level, high, low)\n\n return matrix.clone(thresholded, group=self.p.group)\n\n\n\nclass gradient(Operation):\n \"\"\"\n Compute the gradient plot of the supplied Image.\n\n If the Image value dimension is cyclic, the smallest step is taken\n considered the cyclic range\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Gradient', doc=\"\"\"\n The group assigned to the output gradient matrix.\"\"\")\n\n _per_element = True\n\n def _process(self, matrix, key=None):\n\n if len(matrix.vdims) != 1:\n raise ValueError(\"Input matrix to gradient operation must \"\n \"have single value dimension.\")\n\n matrix_dim = matrix.vdims[0]\n\n data = np.flipud(matrix.dimension_values(matrix_dim, flat=False))\n r, c = data.shape\n\n if matrix_dim.cyclic and (None in matrix_dim.range):\n raise Exception(\"Cyclic range must be specified to compute \"\n \"the gradient of cyclic quantities\")\n cyclic_range = None if not matrix_dim.cyclic else np.diff(matrix_dim.range)\n if cyclic_range is not None:\n # shift values such that wrapping works ok\n data = data - matrix_dim.range[0]\n\n dx = np.diff(data, 1, axis=1)[0:r-1, 0:c-1]\n dy = np.diff(data, 1, axis=0)[0:r-1, 0:c-1]\n\n if cyclic_range is not None: # Wrap into the specified range\n # Convert negative differences to an equivalent positive value\n dx = dx % cyclic_range\n dy = dy % cyclic_range\n #\n # Prefer small jumps\n dx_negatives = dx - cyclic_range\n dy_negatives = dy - cyclic_range\n dx = np.where(np.abs(dx_negatives)<dx, dx_negatives, dx)\n dy = np.where(np.abs(dy_negatives)<dy, dy_negatives, dy)\n\n return Image(np.sqrt(dx * dx + dy * dy), bounds=matrix.bounds, group=self.p.group)\n\n\n\nclass convolve(Operation):\n \"\"\"\n Apply a convolution to an overlay using the top layer as the\n kernel for convolving the bottom layer. Both Image elements in\n the input overlay should have a single value dimension.\n \"\"\"\n\n output_type = Image\n\n group = param.String(default='Convolution', doc=\"\"\"\n The group assigned to the convolved output.\"\"\")\n\n kernel_roi = param.NumericTuple(default=(0,0,0,0), length=4, doc=\"\"\"\n A 2-dimensional slice of the kernel layer to use in the\n convolution in lbrt (left, bottom, right, top) format. By\n default, no slicing is applied.\"\"\")\n\n _per_element = True\n\n def _process(self, overlay, key=None):\n if len(overlay) != 2:\n raise Exception(\"Overlay must contain at least to items.\")\n\n [target, kernel] = overlay.get(0), overlay.get(1)\n\n if len(target.vdims) != 1:\n raise Exception(\"Convolution requires inputs with single value dimensions.\")\n\n xslice = slice(self.p.kernel_roi[0], self.p.kernel_roi[2])\n yslice = slice(self.p.kernel_roi[1], self.p.kernel_roi[3])\n\n k = kernel.data if self.p.kernel_roi == (0,0,0,0) else kernel[xslice, yslice].data\n\n data = np.flipud(target.dimension_values(2, flat=False))\n fft1 = np.fft.fft2(data)\n fft2 = np.fft.fft2(k, s=data.shape)\n convolved_raw = np.fft.ifft2(fft1 * fft2).real\n\n k_rows, k_cols = k.shape\n rolled = np.roll(np.roll(convolved_raw, -(k_cols//2), axis=-1), -(k_rows//2), axis=-2)\n convolved = rolled / float(k.sum())\n\n return Image(convolved, bounds=target.bounds, group=self.p.group)\n\n\n\nclass contours(Operation):\n \"\"\"\n Given a Image with a single channel, annotate it with contour\n lines for a given set of contour levels.\n\n The return is an NdOverlay with a Contours layer for each given\n level, overlaid on top of the input Image.\n \"\"\"\n\n output_type = Overlay\n\n levels = param.ClassSelector(default=10, class_=(list, int), doc=\"\"\"\n A list of scalar values used to specify the contour levels.\"\"\")\n\n group = param.String(default='Level', doc=\"\"\"\n The group assigned to the output contours.\"\"\")\n\n filled = param.Boolean(default=False, doc=\"\"\"\n Whether to generate filled contours\"\"\")\n\n overlaid = param.Boolean(default=False, doc=\"\"\"\n Whether to overlay the contour on the supplied Element.\"\"\")\n\n _per_element = True\n\n def _process(self, element, key=None):\n try:\n from matplotlib.contour import QuadContourSet\n from matplotlib.axes import Axes\n from matplotlib.figure import Figure\n from matplotlib.dates import num2date, date2num\n except ImportError:\n raise ImportError(\"contours operation requires matplotlib.\")\n extent = element.range(0) + element.range(1)[::-1]\n\n xs = element.dimension_values(0, True, flat=False)\n ys = element.dimension_values(1, True, flat=False)\n zs = element.dimension_values(2, flat=False)\n\n # Ensure that coordinate arrays specify bin centers\n if xs.shape[0] != zs.shape[0]:\n xs = xs[:-1] + np.diff(xs, axis=0)/2.\n if xs.shape[1] != zs.shape[1]:\n xs = xs[:, :-1] + (np.diff(xs, axis=1)/2.)\n if ys.shape[0] != zs.shape[0]:\n ys = ys[:-1] + np.diff(ys, axis=0)/2.\n if ys.shape[1] != zs.shape[1]:\n ys = ys[:, :-1] + (np.diff(ys, axis=1)/2.)\n data = (xs, ys, zs)\n\n # if any data is a datetime, transform to matplotlib's numerical format\n data_is_datetime = tuple(isdatetime(arr) for k, arr in enumerate(data))\n if any(data_is_datetime):\n data = tuple(\n date2num(d) if is_datetime else d\n for d, is_datetime in zip(data, data_is_datetime)\n )\n\n xdim, ydim = element.dimensions('key', label=True)\n if self.p.filled:\n contour_type = Polygons\n else:\n contour_type = Contours\n vdims = element.vdims[:1]\n\n kwargs = {}\n levels = self.p.levels\n zmin, zmax = element.range(2)\n if isinstance(self.p.levels, int):\n if zmin == zmax:\n contours = contour_type([], [xdim, ydim], vdims)\n return (element * contours) if self.p.overlaid else contours\n data += (levels,)\n else:\n kwargs = {'levels': levels}\n\n fig = Figure()\n ax = Axes(fig, [0, 0, 1, 1])\n contour_set = QuadContourSet(ax, *data, filled=self.p.filled,\n extent=extent, **kwargs)\n levels = np.array(contour_set.get_array())\n crange = levels.min(), levels.max()\n if self.p.filled:\n levels = levels[:-1] + np.diff(levels)/2.\n vdims = [vdims[0].clone(range=crange)]\n\n paths = []\n empty = np.array([[np.nan, np.nan]])\n for level, cset in zip(levels, contour_set.collections):\n exteriors = []\n interiors = []\n for geom in cset.get_paths():\n interior = []\n polys = geom.to_polygons(closed_only=False)\n for ncp, cp in enumerate(polys):\n if any(data_is_datetime[0:2]):\n # transform x/y coordinates back to datetimes\n xs, ys = np.split(cp, 2, axis=1)\n if data_is_datetime[0]:\n xs = np.array(num2date(xs))\n if data_is_datetime[1]:\n ys = np.array(num2date(ys))\n cp = np.concatenate((xs, ys), axis=1)\n if ncp == 0:\n exteriors.append(cp)\n exteriors.append(empty)\n else:\n interior.append(cp)\n if len(polys):\n interiors.append(interior)\n if not exteriors:\n continue\n geom = {\n element.vdims[0].name:\n num2date(level) if data_is_datetime[2] else level,\n (xdim, ydim): np.concatenate(exteriors[:-1])\n }\n if self.p.filled and interiors:\n geom['holes'] = interiors\n paths.append(geom)\n contours = contour_type(paths, label=element.label, kdims=element.kdims, vdims=vdims)\n if self.p.overlaid:\n contours = element * contours\n return contours\n\n\nclass histogram(Operation):\n \"\"\"\n Returns a Histogram of the input element data, binned into\n num_bins over the bin_range (if specified) along the specified\n dimension.\n \"\"\"\n\n bin_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n Specifies the range within which to compute the bins.\"\"\")\n\n bins = param.ClassSelector(default=None, class_=(np.ndarray, list, tuple, str), doc=\"\"\"\n An explicit set of bin edges or a method to find the optimal\n set of bin edges, e.g. 'auto', 'fd', 'scott' etc. For more\n documentation on these approaches see the np.histogram_bin_edges\n documentation.\"\"\")\n\n cumulative = param.Boolean(default=False, doc=\"\"\"\n Whether to compute the cumulative histogram\"\"\")\n\n dimension = param.String(default=None, doc=\"\"\"\n Along which dimension of the Element to compute the histogram.\"\"\")\n\n frequency_label = param.String(default=None, doc=\"\"\"\n Format string defining the label of the frequency dimension of the Histogram.\"\"\")\n\n groupby = param.ClassSelector(default=None, class_=(str, Dimension), doc=\"\"\"\n Defines a dimension to group the Histogram returning an NdOverlay of Histograms.\"\"\")\n\n log = param.Boolean(default=False, doc=\"\"\"\n Whether to use base 10 logarithmic samples for the bin edges.\"\"\")\n\n mean_weighted = param.Boolean(default=False, doc=\"\"\"\n Whether the weighted frequencies are averaged.\"\"\")\n\n normed = param.ObjectSelector(default=False,\n objects=[True, False, 'integral', 'height'],\n doc=\"\"\"\n Controls normalization behavior. If `True` or `'integral'`, then\n `density=True` is passed to np.histogram, and the distribution\n is normalized such that the integral is unity. If `False`,\n then the frequencies will be raw counts. If `'height'`, then the\n frequencies are normalized such that the max bin height is unity.\"\"\")\n\n nonzero = param.Boolean(default=False, doc=\"\"\"\n Whether to use only nonzero values when computing the histogram\"\"\")\n\n num_bins = param.Integer(default=20, doc=\"\"\"\n Number of bins in the histogram .\"\"\")\n\n weight_dimension = param.String(default=None, doc=\"\"\"\n Name of the dimension the weighting should be drawn from\"\"\")\n\n style_prefix = param.String(default=None, allow_None=None, doc=\"\"\"\n Used for setting a common style for histograms in a HoloMap or AdjointLayout.\"\"\")\n\n def _process(self, element, key=None):\n if self.p.groupby:\n if not isinstance(element, Dataset):\n raise ValueError('Cannot use histogram groupby on non-Dataset Element')\n grouped = element.groupby(self.p.groupby, group_type=Dataset, container_type=NdOverlay)\n self.p.groupby = None\n return grouped.map(self._process, Dataset)\n\n normed = False if self.p.mean_weighted and self.p.weight_dimension else self.p.normed\n if self.p.dimension:\n selected_dim = self.p.dimension\n else:\n selected_dim = [d.name for d in element.vdims + element.kdims][0]\n dim = element.get_dimension(selected_dim)\n\n if hasattr(element, 'interface'):\n data = element.interface.values(element, selected_dim, compute=False)\n else:\n data = element.dimension_values(selected_dim)\n\n is_datetime = isdatetime(data)\n if is_datetime:\n data = data.astype('datetime64[ns]').astype('int64')\n\n # Handle different datatypes\n is_finite = isfinite\n is_cupy = is_cupy_array(data)\n if is_cupy:\n import cupy\n full_cupy_support = LooseVersion(cupy.__version__) > LooseVersion('8.0')\n if not full_cupy_support and (normed or self.p.weight_dimension):\n data = cupy.asnumpy(data)\n is_cupy = False\n else:\n is_finite = cupy.isfinite\n\n # Mask data\n if is_ibis_expr(data):\n mask = data.notnull()\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data.to_projection()\n data = data[mask]\n no_data = not len(data.head(1).execute())\n data = data[dim.name]\n else:\n mask = is_finite(data)\n if self.p.nonzero:\n mask = mask & (data != 0)\n data = data[mask]\n da = dask_array_module()\n no_data = False if da and isinstance(data, da.Array) else not len(data)\n\n # Compute weights\n if self.p.weight_dimension:\n if hasattr(element, 'interface'):\n weights = element.interface.values(element, self.p.weight_dimension, compute=False)\n else:\n weights = element.dimension_values(self.p.weight_dimension)\n weights = weights[mask]\n else:\n weights = None\n\n # Compute bins\n if isinstance(self.p.bins, str):\n bin_data = cupy.asnumpy(data) if is_cupy else data\n edges = np.histogram_bin_edges(bin_data, bins=self.p.bins)\n elif isinstance(self.p.bins, (list, np.ndarray)):\n edges = self.p.bins\n if isdatetime(edges):\n edges = edges.astype('datetime64[ns]').astype('int64')\n else:\n hist_range = self.p.bin_range or element.range(selected_dim)\n # Suppress a warning emitted by Numpy when datetime or timedelta scalars\n # are compared. See https://github.com/numpy/numpy/issues/10095 and\n # https://github.com/numpy/numpy/issues/9210. \n with warnings.catch_warnings():\n warnings.filterwarnings(\n action='ignore', message='elementwise comparison failed',\n category=DeprecationWarning\n )\n null_hist_range = hist_range == (0, 0)\n # Avoids range issues including zero bin range and empty bins\n if null_hist_range or any(not isfinite(r) for r in hist_range):\n hist_range = (0, 1)\n steps = self.p.num_bins + 1\n start, end = hist_range\n if is_datetime:\n start, end = dt_to_int(start, 'ns'), dt_to_int(end, 'ns')\n if self.p.log:\n bin_min = max([abs(start), data[data>0].min()])\n edges = np.logspace(np.log10(bin_min), np.log10(end), steps)\n else:\n edges = np.linspace(start, end, steps)\n if is_cupy:\n edges = cupy.asarray(edges)\n\n if not is_dask_array(data) and no_data:\n nbins = self.p.num_bins if self.p.bins is None else len(self.p.bins)-1\n hist = np.zeros(nbins)\n elif hasattr(element, 'interface'):\n density = True if normed else False\n hist, edges = element.interface.histogram(\n data, edges, density=density, weights=weights\n )\n if normed == 'height':\n hist /= hist.max()\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = element.interface.histogram(\n data, density=False, bins=edges\n )\n hist /= hist_mean\n elif normed:\n # This covers True, 'height', 'integral'\n hist, edges = np.histogram(data, density=True,\n weights=weights, bins=edges)\n if normed == 'height':\n hist /= hist.max()\n else:\n hist, edges = np.histogram(data, normed=normed, weights=weights, bins=edges)\n if self.p.weight_dimension and self.p.mean_weighted:\n hist_mean, _ = np.histogram(data, density=False, bins=self.p.num_bins)\n hist /= hist_mean\n\n hist[np.isnan(hist)] = 0\n if is_datetime:\n edges = (edges/1e3).astype('datetime64[us]')\n\n params = {}\n if self.p.weight_dimension:\n params['vdims'] = [element.get_dimension(self.p.weight_dimension)]\n elif self.p.frequency_label:\n label = self.p.frequency_label.format(dim=dim.pprint_label)\n params['vdims'] = [Dimension('Frequency', label=label)]\n else:\n label = 'Frequency' if normed else 'Count'\n params['vdims'] = [Dimension('{0}_{1}'.format(dim.name, label.lower()),\n label=label)]\n\n if element.group != element.__class__.__name__:\n params['group'] = element.group\n\n if self.p.cumulative:\n hist = np.cumsum(hist)\n if self.p.normed in (True, 'integral'):\n hist *= edges[1]-edges[0]\n\n # Save off the computed bin edges so that if this operation instance\n # is used to compute another histogram, it will default to the same\n # bin edges.\n self.bins = list(edges)\n return Histogram((edges, hist), kdims=[element.get_dimension(selected_dim)],\n label=element.label, **params)\n\n\nclass decimate(Operation):\n \"\"\"\n Decimates any column based Element to a specified number of random\n rows if the current element defined by the x_range and y_range\n contains more than max_samples. By default the operation returns a\n DynamicMap with a RangeXY stream allowing dynamic downsampling.\n \"\"\"\n\n dynamic = param.Boolean(default=True, doc=\"\"\"\n Enables dynamic processing by default.\"\"\")\n\n link_inputs = param.Boolean(default=True, doc=\"\"\"\n By default, the link_inputs parameter is set to True so that\n when applying shade, backends that support linked streams\n update RangeXY streams on the inputs of the shade operation.\"\"\")\n\n max_samples = param.Integer(default=5000, doc=\"\"\"\n Maximum number of samples to display at the same time.\"\"\")\n\n random_seed = param.Integer(default=42, doc=\"\"\"\n Seed used to initialize randomization.\"\"\")\n\n streams = param.ClassSelector(default=[RangeXY], class_=(dict, list),\n doc=\"\"\"\n List of streams that are applied if dynamic=True, allowing\n for dynamic interaction with the plot.\"\"\")\n\n x_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max x-value. Auto-ranges\n if set to None.\"\"\")\n\n y_range = param.NumericTuple(default=None, length=2, doc=\"\"\"\n The x_range as a tuple of min and max y-value. Auto-ranges\n if set to None.\"\"\")\n\n _per_element = True\n\n def _process_layer(self, element, key=None):\n if not isinstance(element, Dataset):\n raise ValueError(\"Cannot downsample non-Dataset types.\")\n if element.interface not in column_interfaces:\n element = element.clone(tuple(element.columns().values()))\n\n xstart, xend = self.p.x_range if self.p.x_range else element.range(0)\n ystart, yend = self.p.y_range if self.p.y_range else element.range(1)\n\n # Slice element to current ranges\n xdim, ydim = element.dimensions(label=True)[0:2]\n sliced = element.select(**{xdim: (xstart, xend),\n ydim: (ystart, yend)})\n\n if len(sliced) > self.p.max_samples:\n prng = np.random.RandomState(self.p.random_seed)\n choice = prng.choice(len(sliced), self.p.max_samples, False)\n return sliced.iloc[np.sort(choice)]\n return sliced\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\nclass interpolate_curve(Operation):\n \"\"\"\n Resamples a Curve using the defined interpolation method, e.g.\n to represent changes in y-values as steps.\n \"\"\"\n\n interpolation = param.ObjectSelector(objects=['steps-pre', 'steps-mid',\n 'steps-post', 'linear'],\n default='steps-mid', doc=\"\"\"\n Controls the transition point of the step along the x-axis.\"\"\")\n\n _per_element = True\n\n @classmethod\n def pts_to_prestep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[0:-2:2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[2::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_midstep(cls, x, values):\n steps = np.zeros(2 * len(x))\n value_steps = tuple(np.empty(2 * len(x), dtype=v.dtype) for v in values)\n\n steps[1:-1:2] = steps[2::2] = x[:-1] + (x[1:] - x[:-1])/2\n steps[0], steps[-1] = x[0], x[-1]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0::2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n @classmethod\n def pts_to_poststep(cls, x, values):\n steps = np.zeros(2 * len(x) - 1)\n value_steps = tuple(np.empty(2 * len(x) - 1, dtype=v.dtype) for v in values)\n\n steps[0::2] = x\n steps[1::2] = steps[2::2]\n\n val_arrays = []\n for v, s in zip(values, value_steps):\n s[0::2] = v\n s[1::2] = s[0:-2:2]\n val_arrays.append(s)\n\n return steps, tuple(val_arrays)\n\n def _process_layer(self, element, key=None):\n INTERPOLATE_FUNCS = {'steps-pre': self.pts_to_prestep,\n 'steps-mid': self.pts_to_midstep,\n 'steps-post': self.pts_to_poststep}\n if self.p.interpolation not in INTERPOLATE_FUNCS:\n return element\n x = element.dimension_values(0)\n is_datetime = isdatetime(x)\n if is_datetime:\n dt_type = 'datetime64[ns]'\n x = x.astype(dt_type)\n dvals = tuple(element.dimension_values(d) for d in element.dimensions()[1:])\n xs, dvals = INTERPOLATE_FUNCS[self.p.interpolation](x, dvals)\n if is_datetime:\n xs = xs.astype(dt_type)\n return element.clone((xs,)+dvals)\n\n def _process(self, element, key=None):\n return element.map(self._process_layer, Element)\n\n\n#==================#\n# Other operations #\n#==================#\n\n\nclass collapse(Operation):\n \"\"\"\n Given an overlay of Element types, collapse into single Element\n object using supplied function. Collapsing aggregates over the\n key dimensions of each object applying the supplied fn to each group.\n\n This is an example of an Operation that does not involve\n any Raster types.\n \"\"\"\n\n fn = param.Callable(default=np.mean, doc=\"\"\"\n The function that is used to collapse the curve y-values for\n each x-value.\"\"\")\n\n def _process(self, overlay, key=None):\n if isinstance(overlay, NdOverlay):\n collapse_map = HoloMap(overlay)\n else:\n collapse_map = HoloMap({i: el for i, el in enumerate(overlay)})\n return collapse_map.collapse(function=self.p.fn)\n\n\nclass gridmatrix(param.ParameterizedFunction):\n \"\"\"\n The gridmatrix operation takes an Element or HoloMap\n of Elements as input and creates a GridMatrix object,\n which plots each dimension in the Element against\n each other dimension. This provides a very useful\n overview of high-dimensional data and is inspired\n by pandas and seaborn scatter_matrix implementations.\n \"\"\"\n\n chart_type = param.Parameter(default=Scatter, doc=\"\"\"\n The Element type used to display bivariate distributions\n of the data.\"\"\")\n\n diagonal_type = param.Parameter(default=None, doc=\"\"\"\n The Element type along the diagonal, may be a Histogram or any\n other plot type which can visualize a univariate distribution.\n This parameter overrides diagonal_operation.\"\"\")\n\n diagonal_operation = param.Parameter(default=histogram, doc=\"\"\"\n The operation applied along the diagonal, may be a histogram-operation\n or any other function which returns a viewable element.\"\"\")\n\n overlay_dims = param.List(default=[], doc=\"\"\"\n If a HoloMap is supplied, this will allow overlaying one or\n more of its key dimensions.\"\"\")\n\n def __call__(self, data, **params):\n p = param.ParamOverrides(self, params)\n\n if isinstance(data, (HoloMap, NdOverlay)):\n ranges = {d.name: data.range(d) for d in data.dimensions()}\n data = data.clone({k: GridMatrix(self._process(p, v, ranges))\n for k, v in data.items()})\n data = Collator(data, merge_type=type(data))()\n if p.overlay_dims:\n data = data.map(lambda x: x.overlay(p.overlay_dims), (HoloMap,))\n return data\n elif isinstance(data, Element):\n data = self._process(p, data)\n return GridMatrix(data)\n\n\n def _process(self, p, element, ranges={}):\n # Creates a unified Dataset.data attribute\n # to draw the data from\n if isinstance(element.data, np.ndarray):\n el_data = element.table(default_datatype)\n else:\n el_data = element.data\n\n # Get dimensions to plot against each other\n types = (str, np.str_, np.object_)+datetime_types\n dims = [d for d in element.dimensions()\n if _is_number(element.range(d)[0]) and\n not issubclass(element.get_dimension_type(d), types)]\n permuted_dims = [(d1, d2) for d1 in dims\n for d2 in dims[::-1]]\n\n # Convert Histogram type to operation to avoid one case in the if below.\n if p.diagonal_type is Histogram:\n p.diagonal_type = None\n p.diagonal_operation = histogram\n\n data = {}\n for d1, d2 in permuted_dims:\n if d1 == d2:\n if p.diagonal_type is not None:\n if p.diagonal_type._auto_indexable_1d:\n el = p.diagonal_type(el_data, kdims=[d1], vdims=[d2],\n datatype=[default_datatype])\n else:\n values = element.dimension_values(d1)\n el = p.diagonal_type(values, kdims=[d1])\n elif p.diagonal_operation is None:\n continue\n elif p.diagonal_operation is histogram or isinstance(p.diagonal_operation, histogram):\n bin_range = ranges.get(d1.name, element.range(d1))\n el = p.diagonal_operation(element, dimension=d1.name, bin_range=bin_range)\n else:\n el = p.diagonal_operation(element, dimension=d1.name)\n else:\n kdims, vdims = ([d1, d2], []) if len(p.chart_type.kdims) == 2 else (d1, d2)\n el = p.chart_type(el_data, kdims=kdims, vdims=vdims,\n datatype=[default_datatype])\n data[(d1.name, d2.name)] = el\n return data\n",
"path": "holoviews/operation/element.py"
}
] | diff --git a/holoviews/operation/element.py b/holoviews/operation/element.py
index 3c982fe1d7..d70e600f07 100644
--- a/holoviews/operation/element.py
+++ b/holoviews/operation/element.py
@@ -909,7 +909,8 @@ def _process_layer(self, element, key=None):
if len(sliced) > self.p.max_samples:
prng = np.random.RandomState(self.p.random_seed)
- return sliced.iloc[prng.choice(len(sliced), self.p.max_samples, False)]
+ choice = prng.choice(len(sliced), self.p.max_samples, False)
+ return sliced.iloc[np.sort(choice)]
return sliced
def _process(self, element, key=None):
| Interpolated charts (Curve, Area, etc) plot data in the provided order, but decimate unsorts data
#### ALL software version info
| Library | Version |
| -- | -- |
| python | 3.9.13 |
| holoviews | 1.15.0 |
| bokeh | 2.4.3 |
| pandas | 1.4.4 |
| numpy | 1.23.3 |
#### Description of expected behavior and the observed behavior
I should be able to decimate Curve/Area charts the same way I can decimate scatter charts. Decimating interpolated charts currently results in garbled output.
#### Complete, minimal, self-contained example code that reproduces the issue
```python
import numpy as np
import pandas as pd
import holoviews as hv
hv.extension('bokeh')
x = np.linspace(0, 10, 100)
y1 = np.sin(x)
y2 = np.cos(x)
table = hv.Table((x, y1, y2), 'x', ['y1', 'y2'])
hv.Area(table) # See Figure 1
---------------------------
from holoviews.operation import decimate
decimate(hv.Area(table), max_samples = 50) # See Figure 2
```
#### Screenshots or screencasts of the bug in action
Figure 1

Figure 2

|
interlegis__sapl-979 | [
{
"content": "import re\nfrom datetime import date\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import OperationalError, ProgrammingError, connections, models\nfrom django.db.models import CharField, Max, ProtectedError, TextField\nfrom django.db.models.base import ModelBase\nfrom model_mommy import mommy\nfrom model_mommy.mommy import foreign_key_required, make\n\nfrom sapl.base.models import Autor, ProblemaMigracao\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import Protocolo as ProtocoloLegado\nfrom sapl.materia.models import (Proposicao, StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import Protocolo, StatusTramitacaoAdministrativo\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, SessaoPlenaria\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null is False:\n value = 0\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n msg = 'FK [%s] não encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n if not field.null:\n fields_dict = get_fields_dict(field.related_model)\n # Cria stub ao final da tabela para evitar erros\n pk = 1\n if hasattr(field.related_model.objects.last(), 'pk'):\n pk = field.related_model.objects.last().pk\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migração')\n value = mommy.make(\n field.related_model, **fields_dict,\n pk=(pk + 1 or 1))\n descricao = 'stub criado para campos não nuláveis!'\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n warn(msg + ' => ' + descricao)\n else:\n value = None\n else:\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem':\n return value\n # Caso TipoProposicao não exista, um objeto será criado então\n # com content_type=13 (ProblemaMigracao)\n if field.related_model.__name__ == 'TipoProposicao':\n tipo = TipoProposicao.objects.filter(descricao='Erro')\n if not tipo:\n with reversion.create_revision():\n reversion.set_comment(\n 'TipoProposicao \"Erro\" criado')\n ct = ContentType.objects.get(pk=13)\n value = TipoProposicao.objects.create(\n id=value, descricao='Erro', content_type=ct)\n else:\n value = tipo[0]\n else:\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migração')\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orfã!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n else:\n assert value\n return value\n\n\ndef get_field(model, fieldname):\n return model._meta.get_field(fieldname)\n\n\ndef exec_sql_file(path, db='default'):\n cursor = connections[db].cursor()\n for line in open(path):\n try:\n cursor.execute(line)\n except (OperationalError, ProgrammingError) as e:\n print(\"Args: '%s'\" % (str(e.args)))\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef delete_constraints(model):\n # pega nome da unique constraint dado o nome da tabela\n table = model._meta.db_table\n cursor = exec_sql(\"SELECT conname FROM pg_constraint WHERE conrelid = \"\n \"(SELECT oid FROM pg_class WHERE relname LIKE \"\n \"'%s') and contype = 'u';\" % (table))\n result = ()\n result = cursor.fetchall()\n # se existir um resultado, unique constraint será deletado\n for r in result:\n if r[0].endswith('key'):\n words_list = r[0].split('_')\n one_to_one_constraints.append([table, r[0], words_list, model])\n else:\n args = None\n args_list = []\n if model._meta.unique_together:\n args = model._meta.unique_together[0]\n args_list = list(args)\n unique_constraints.append([table, r[0], args_list, model])\n warn('Excluindo unique constraint de nome %s' % r[0])\n exec_sql(\"ALTER TABLE %s DROP CONSTRAINT %s;\" %\n (table, r[0]))\n\n\ndef recreate_constraints():\n if one_to_one_constraints:\n for constraint in one_to_one_constraints:\n table, name, args, model = constraint\n args_string = ''\n args_string = \"(\" + \"_\".join(map(str, args[2:-1])) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n if unique_constraints:\n for constraint in unique_constraints:\n table, name, args, model = constraint\n for i in range(len(args)):\n if isinstance(model._meta.get_field(args[i]),\n models.ForeignKey):\n args[i] = args[i] + '_id'\n args_string = ''\n args_string += \"(\" + ', '.join(map(str, args)) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n one_to_one_constraints.clear()\n unique_constraints.clear()\n\n\ndef stub_desnecessario(obj):\n lista_fields = [\n f for f in obj._meta.get_fields()\n if (f.one_to_many or f.one_to_one) and f.auto_created\n ]\n desnecessario = not any(\n rr.related_model.objects.filter(**{rr.field.name: obj}).exists()\n for rr in lista_fields)\n return desnecessario\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] if last_value['pk__max'] else 0\n\n\ndef alter_sequence(model, id):\n sequence_name = '%s_id_seq' % model._meta.db_table\n exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))\n\n\ndef save_with_id(new, id):\n last_value = get_last_value(type(new))\n alter_sequence(type(new), id)\n new.save()\n alter_sequence(type(new), last_value + 1)\n assert new.id == id, 'New id is different from provided!'\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub,\n )\n link.save()\n\n\ndef make_stub(model, id):\n fields_dict = get_fields_dict(model)\n new = mommy.prepare(model, **fields_dict, pk=id)\n save_with_id(new, id)\n\n return new\n\n\ndef get_fields_dict(model):\n all_fields = model._meta.get_fields()\n fields_dict = {}\n fields_dict = {f.name: '????????????'[:f.max_length]\n for f in all_fields\n if isinstance(f, (CharField, TextField)) and\n not f.choices and not f.blank}\n return fields_dict\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera a norma'),\n ('R', 'Revoga integralmente a norma'),\n ('P', 'Revoga parcialmente a norma'),\n ('T', 'Revoga integralmente por consolidação'),\n ('C', 'Norma Correlata'),\n ('S', 'Ressalva a Norma'),\n ('E', 'Reedita a Norma'),\n ('I', 'Reedita a Norma com Alteração'),\n ('G', 'Regulamenta a Norma'),\n ('K', 'Suspende parcialmente a norma'),\n ('L', 'Suspende integralmente a norma'),\n ('N', 'Julgada integralmente inconstitucional'),\n ('O', 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(sigla=item[0], descricao=item[1])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n msg = (\"O valor do campo %s (%s) da model %s era inválido\" %\n (field.name, field_type, field.model.__name__))\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n old_type = type(old) # not necessarily a model\n if hasattr(old_type, '_meta') and \\\n old_type._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n value = get_fk_related(field, old_value, label)\n else:\n value = getattr(old, old_field_name)\n if field_type == 'DateField' and \\\n not field.null and value is None:\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(msg +\n ' => ' + descricao)\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if field_type == 'CharField' or field_type == 'TextField':\n if value is None or value == 'None':\n value = ''\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem' and \\\n not isinstance(value, OrdemDia):\n try:\n new_value = ExpedienteMateria.objects.get(pk=value)\n setattr(new, 'expediente', new_value)\n setattr(new, field.name, None)\n continue\n except ObjectDoesNotExist:\n msg = 'FK [%s] não encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n with reversion.create_revision():\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orfã!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n reversion.set_comment('Stub criado pela migração')\n setattr(new, field.name, value)\n elif field.model.__name__ == 'TipoAutor' and \\\n field.name == 'content_type':\n\n model = normalize(new.descricao.lower()).replace(' ', '')\n content_types = field.related_model.objects.filter(\n model=model).exclude(app_label='legacy')\n assert len(content_types) <= 1\n\n value = content_types[0] if content_types else None\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n self.to_delete = []\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco serão excluidos. '\n 'Recomendamos que faça backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migração cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--settings=sapl.settings', '--database=default', '--no-input'],\n stdout=PIPE)\n\n info('Começando migração: %s...' % obj)\n self._do_migrate(obj)\n # exclude logically deleted in legacy base\n info('Deletando models com ind_excluido...')\n while self.to_delete:\n for obj in self.to_delete:\n try:\n obj.delete()\n self.to_delete.remove(obj)\n except ProtectedError:\n msg = 'A entrada de PK %s da model %s não pode ser ' \\\n 'excluida' % (obj.pk, obj._meta.model_name)\n descricao = 'Um ou mais objetos protegidos '\n warn(msg + ' => ' + descricao)\n save_relation(obj=obj, problema=msg,\n descricao=descricao, eh_stub=False)\n\n info('Deletando stubs desnecessários...')\n while self.delete_stubs():\n pass\n info('Recriando unique constraints...')\n # recreate_constraints()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migração vai pular TipoProposicao e só vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # Clear all model entries\n # They may have been created in a previous migration attempt\n try:\n model.objects.all().delete()\n except ProtectedError:\n Proposicao.objects.all().delete()\n model.objects.all().delete()\n delete_constraints(model)\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n save_with_id(new, getattr(old, legacy_pk_name))\n reversion.set_comment('Objeto criado pela migração')\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n for old in old_records:\n new = model()\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment('Ajuste de data pela migração')\n if getattr(old, 'ind_excluido', False):\n self.to_delete.append(new)\n\n def delete_stubs(self):\n excluidos = 0\n for obj in ProblemaMigracao.objects.all():\n if obj.content_object and obj.eh_stub:\n original = obj.content_type.get_all_objects_for_this_type(\n id=obj.object_id)\n if stub_desnecessario(original[0]):\n qtd_exclusoes, *_ = original.delete()\n assert qtd_exclusoes == 1\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n elif not obj.content_object and not obj.eh_stub:\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n return excluidos\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_ordemdia(new, old):\n # Prestar atenção\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao, composicao.periodo = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migração')\n new.composicao = composicao\n\n\ndef adjust_protocolo(new, old):\n if new.numero is None and not primeira_vez:\n p = ProtocoloLegado.objects.filter(\n ano_protocolo=new.ano).aggregate(Max('num_protocolo'))\n numero_maximo = p['num_protocolo__max']\n new.numero = 1 if numero_maximo is None else numero_maximo + 1\n primeira_vez.append(True)\n if new.numero is None and primeira_vez:\n p = Protocolo.objects.filter(\n ano=new.ano).aggregate(Max('numero'))\n new.numero = p['numero__max'] + 1\n\n\ndef adjust_sessaoplenaria(new, old):\n assert not old.tip_expediente\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n new.tipo_conteudo_related = TipoMateriaLegislativa.objects.get(\n pk=old.tip_mat_ou_doc)\n elif old.ind_mat_ou_doc == 'D':\n new.tipo_conteudo_related = TipoDocumento.objects.get(\n pk=old.tip_mat_ou_doc)\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == 'Ú':\n new.turno = 'U'\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na versão antiga do SAPL, quando uma opção do\n # combobox era selecionada, o sistema pegava a primeira letra da seleção,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira opção quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta relação M2M\n lista_pks_assunto = old.cod_assunto.split(',')\n for pk_assunto in lista_pks_assunto:\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'Número do protocolo de PK %s é nulo' % new.pk\n descricao = 'Número do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Numero de protocolo teve que ser alterado')\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n new.nome = new.autor_related.nome_parlamentar\n elif old.cod_comissao:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n\n if old.col_username:\n if not get_user_model().objects.filter(\n username=old.col_username).exists():\n user = get_user_model()(\n username=old.col_username, password=12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment('Objeto criado pela migração')\n new.user = user\n else:\n new.user = get_user_model().objects.filter(\n username=old.col_username)[0]\n\n\ndef adjust_comissao(new, old):\n if old.dat_extincao:\n if date.today() < new.data_extincao:\n new.ativa = True\n else:\n new.ativa = False\n if not old.dat_extincao:\n new.ativa = True\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n Comissao: adjust_comissao,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n OrdemDia: adjust_ordemdia,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Protocolo: adjust_protocolo,\n SessaoPlenaria: adjust_sessaoplenaria,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(obj):\n legacy_model = legacy_app.get_model(type(obj).__name__)\n return getattr(legacy_model.objects.get(\n **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(obj) for obj in model.objects.all())\n print('OK!')\n\n# MOMMY MAKE WITH LOG ######################################################\n\n\ndef make_with_log(model, _quantity=None, make_m2m=False, **attrs):\n last_value = get_last_value(model)\n alter_sequence(model, last_value + 1)\n fields_dict = get_fields_dict(model)\n stub = make(model, _quantity, make_m2m, **fields_dict)\n problema = 'Um stub foi necessário durante a criação de um outro stub'\n descricao = 'Essa entrada é necessária para um dos stubs criados'\n ' anteriormente'\n warn(problema)\n save_relation(obj=stub, problema=problema,\n descricao=descricao, eh_stub=True)\n return stub\n\nmake_with_log.required = foreign_key_required\n",
"path": "sapl/legacy/migration.py"
}
] | [
{
"content": "import re\nfrom datetime import date\nfrom subprocess import PIPE, call\n\nimport pkg_resources\nimport reversion\nimport yaml\nfrom django.apps import apps\nfrom django.apps.config import AppConfig\nfrom django.contrib.auth import get_user_model\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.db import OperationalError, ProgrammingError, connections, models\nfrom django.db.models import CharField, Max, ProtectedError, TextField\nfrom django.db.models.base import ModelBase\nfrom model_mommy import mommy\nfrom model_mommy.mommy import foreign_key_required, make\n\nfrom sapl.base.models import Autor, ProblemaMigracao\nfrom sapl.comissoes.models import Comissao, Composicao, Participacao\nfrom sapl.legacy.models import Protocolo as ProtocoloLegado\nfrom sapl.materia.models import (Proposicao, StatusTramitacao, TipoDocumento,\n TipoMateriaLegislativa, TipoProposicao,\n Tramitacao)\nfrom sapl.norma.models import (AssuntoNorma, NormaJuridica,\n TipoVinculoNormaJuridica)\nfrom sapl.parlamentares.models import Parlamentar\nfrom sapl.protocoloadm.models import Protocolo, StatusTramitacaoAdministrativo\nfrom sapl.sessao.models import ExpedienteMateria, OrdemDia, SessaoPlenaria\nfrom sapl.settings import PROJECT_DIR\nfrom sapl.utils import normalize\n\n# BASE ######################################################################\n# apps to be migrated, in app dependency order (very important)\nappconfs = [apps.get_app_config(n) for n in [\n 'parlamentares',\n 'comissoes',\n 'base',\n 'materia',\n 'norma',\n 'sessao',\n 'lexml',\n 'protocoloadm', ]]\n\nunique_constraints = []\none_to_one_constraints = []\nprimeira_vez = []\n\nname_sets = [set(m.__name__ for m in ac.get_models()) for ac in appconfs]\n\n# apps do not overlap\nfor s1 in name_sets:\n for s2 in name_sets:\n if s1 is not s2:\n assert not s1.intersection(s2)\n\n# apps include all legacy models\nlegacy_app = apps.get_app_config('legacy')\nlegacy_model_names = set(m.__name__ for m in legacy_app.get_models())\n\nmodel_dict = {m.__name__: m for ac in appconfs for m in ac.get_models()}\n\n\n# RENAMES ###################################################################\n\nMODEL_RENAME_PATTERN = re.compile('(.+) \\((.+)\\)')\n\n\ndef get_renames():\n field_renames = {}\n model_renames = {}\n for app in appconfs:\n app_rename_data = yaml.load(\n pkg_resources.resource_string(app.module.__name__, 'legacy.yaml'))\n for model_name, renames in app_rename_data.items():\n match = MODEL_RENAME_PATTERN.match(model_name)\n if match:\n model_name, old_name = match.groups()\n else:\n old_name = None\n model = getattr(app.models_module, model_name)\n if old_name:\n model_renames[model] = old_name\n field_renames[model] = renames\n\n # collect renames from parent classes\n for model, renames in field_renames.items():\n if any(parent in field_renames for parent in model.__mro__[1:]):\n renames = {}\n for parent in reversed(model.__mro__):\n if parent in field_renames:\n renames.update(field_renames[parent])\n field_renames[model] = renames\n\n # remove abstract classes\n field_renames = {m: r for m, r in field_renames.items()\n if not m._meta.abstract}\n\n return field_renames, model_renames\n\n# MIGRATION #################################################################\n\n\ndef info(msg):\n print('INFO: ' + msg)\n\n\ndef warn(msg):\n print('CUIDADO! ' + msg)\n\n\ndef get_fk_related(field, value, label=None):\n if value is None and field.null is False:\n value = 0\n if value is not None:\n try:\n value = field.related_model.objects.get(id=value)\n except ObjectDoesNotExist:\n msg = 'FK [%s] não encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n if value == 0:\n if not field.null:\n fields_dict = get_fields_dict(field.related_model)\n # Cria stub ao final da tabela para evitar erros\n pk = 1\n if hasattr(field.related_model.objects.last(), 'pk'):\n pk = field.related_model.objects.last().pk\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migração')\n value = mommy.make(\n field.related_model, **fields_dict,\n pk=(pk + 1 or 1))\n descricao = 'stub criado para campos não nuláveis!'\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n warn(msg + ' => ' + descricao)\n else:\n value = None\n else:\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem':\n return value\n # Caso TipoProposicao não exista, um objeto será criado então\n # com content_type=13 (ProblemaMigracao)\n if field.related_model.__name__ == 'TipoProposicao':\n tipo = TipoProposicao.objects.filter(descricao='Erro')\n if not tipo:\n with reversion.create_revision():\n reversion.set_comment(\n 'TipoProposicao \"Erro\" criado')\n ct = ContentType.objects.get(pk=13)\n value = TipoProposicao.objects.create(\n id=value, descricao='Erro', content_type=ct)\n else:\n value = tipo[0]\n else:\n with reversion.create_revision():\n reversion.set_comment('Stub criado pela migração')\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orfã!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n else:\n assert value\n return value\n\n\ndef get_field(model, fieldname):\n return model._meta.get_field(fieldname)\n\n\ndef exec_sql_file(path, db='default'):\n cursor = connections[db].cursor()\n for line in open(path):\n try:\n cursor.execute(line)\n except (OperationalError, ProgrammingError) as e:\n print(\"Args: '%s'\" % (str(e.args)))\n\n\ndef exec_sql(sql, db='default'):\n cursor = connections[db].cursor()\n cursor.execute(sql)\n return cursor\n\n\ndef iter_sql_records(sql, db):\n class Record:\n pass\n cursor = exec_sql(sql, db)\n fieldnames = [name[0] for name in cursor.description]\n for row in cursor.fetchall():\n record = Record()\n record.__dict__.update(zip(fieldnames, row))\n yield record\n\n\ndef delete_constraints(model):\n # pega nome da unique constraint dado o nome da tabela\n table = model._meta.db_table\n cursor = exec_sql(\"SELECT conname FROM pg_constraint WHERE conrelid = \"\n \"(SELECT oid FROM pg_class WHERE relname LIKE \"\n \"'%s') and contype = 'u';\" % (table))\n result = ()\n result = cursor.fetchall()\n # se existir um resultado, unique constraint será deletado\n for r in result:\n if r[0].endswith('key'):\n words_list = r[0].split('_')\n one_to_one_constraints.append([table, r[0], words_list, model])\n else:\n args = None\n args_list = []\n if model._meta.unique_together:\n args = model._meta.unique_together[0]\n args_list = list(args)\n unique_constraints.append([table, r[0], args_list, model])\n warn('Excluindo unique constraint de nome %s' % r[0])\n exec_sql(\"ALTER TABLE %s DROP CONSTRAINT %s;\" %\n (table, r[0]))\n\n\ndef recreate_constraints():\n if one_to_one_constraints:\n for constraint in one_to_one_constraints:\n table, name, args, model = constraint\n args_string = ''\n args_string = \"(\" + \"_\".join(map(str, args[2:-1])) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n if unique_constraints:\n for constraint in unique_constraints:\n table, name, args, model = constraint\n for i in range(len(args)):\n if isinstance(model._meta.get_field(args[i]),\n models.ForeignKey):\n args[i] = args[i] + '_id'\n args_string = ''\n args_string += \"(\" + ', '.join(map(str, args)) + \")\"\n exec_sql(\"ALTER TABLE %s ADD CONSTRAINT %s UNIQUE %s;\" %\n (table, name, args_string))\n one_to_one_constraints.clear()\n unique_constraints.clear()\n\n\ndef stub_desnecessario(obj):\n lista_fields = [\n f for f in obj._meta.get_fields()\n if (f.one_to_many or f.one_to_one) and f.auto_created\n ]\n desnecessario = not any(\n rr.related_model.objects.filter(**{rr.field.name: obj}).exists()\n for rr in lista_fields)\n return desnecessario\n\n\ndef get_last_value(model):\n last_value = model.objects.all().aggregate(Max('pk'))\n return last_value['pk__max'] if last_value['pk__max'] else 0\n\n\ndef alter_sequence(model, id):\n sequence_name = '%s_id_seq' % model._meta.db_table\n exec_sql('ALTER SEQUENCE %s RESTART WITH %s;' % (sequence_name, id))\n\n\ndef save_with_id(new, id):\n last_value = get_last_value(type(new))\n alter_sequence(type(new), id)\n new.save()\n alter_sequence(type(new), last_value + 1)\n assert new.id == id, 'New id is different from provided!'\n\n\ndef save_relation(obj, nome_campo='', problema='', descricao='',\n eh_stub=False):\n link = ProblemaMigracao(\n content_object=obj, nome_campo=nome_campo, problema=problema,\n descricao=descricao, eh_stub=eh_stub,\n )\n link.save()\n\n\ndef make_stub(model, id):\n fields_dict = get_fields_dict(model)\n new = mommy.prepare(model, **fields_dict, pk=id)\n save_with_id(new, id)\n\n return new\n\n\ndef get_fields_dict(model):\n all_fields = model._meta.get_fields()\n fields_dict = {}\n fields_dict = {f.name: '????????????'[:f.max_length]\n for f in all_fields\n if isinstance(f, (CharField, TextField)) and\n not f.choices and not f.blank}\n return fields_dict\n\n\ndef fill_vinculo_norma_juridica():\n lista = [('A', 'Altera a norma'),\n ('R', 'Revoga integralmente a norma'),\n ('P', 'Revoga parcialmente a norma'),\n ('T', 'Revoga integralmente por consolidação'),\n ('C', 'Norma Correlata'),\n ('S', 'Ressalva a Norma'),\n ('E', 'Reedita a Norma'),\n ('I', 'Reedita a Norma com Alteração'),\n ('G', 'Regulamenta a Norma'),\n ('K', 'Suspende parcialmente a norma'),\n ('L', 'Suspende integralmente a norma'),\n ('N', 'Julgada integralmente inconstitucional'),\n ('O', 'Julgada parcialmente inconstitucional')]\n lista_objs = [TipoVinculoNormaJuridica(sigla=item[0], descricao=item[1])\n for item in lista]\n TipoVinculoNormaJuridica.objects.bulk_create(lista_objs)\n\n\nclass DataMigrator:\n\n def __init__(self):\n self.field_renames, self.model_renames = get_renames()\n self.data_mudada = {}\n self.choice_valida = {}\n\n def populate_renamed_fields(self, new, old):\n renames = self.field_renames[type(new)]\n\n for field in new._meta.fields:\n old_field_name = renames.get(field.name)\n field_type = field.get_internal_type()\n msg = (\"O valor do campo %s (%s) da model %s era inválido\" %\n (field.name, field_type, field.model.__name__))\n if old_field_name:\n old_value = getattr(old, old_field_name)\n if isinstance(field, models.ForeignKey):\n old_type = type(old) # not necessarily a model\n if hasattr(old_type, '_meta') and \\\n old_type._meta.pk.name != 'id':\n label = old.pk\n else:\n label = '-- SEM PK --'\n value = get_fk_related(field, old_value, label)\n else:\n value = getattr(old, old_field_name)\n if field_type == 'DateField' and \\\n not field.null and value is None:\n descricao = 'A data 1111-11-11 foi colocada no lugar'\n problema = 'O valor da data era nulo ou inválido'\n warn(msg +\n ' => ' + descricao)\n value = date(1111, 11, 11)\n self.data_mudada['obj'] = new\n self.data_mudada['descricao'] = descricao\n self.data_mudada['problema'] = problema\n self.data_mudada.setdefault('nome_campo', []).\\\n append(field.name)\n if field_type == 'CharField' or field_type == 'TextField':\n if value is None or value == 'None':\n value = ''\n if field.model._meta.label == 'sessao.RegistroVotacao' and \\\n field.name == 'ordem' and \\\n not isinstance(value, OrdemDia):\n try:\n new_value = ExpedienteMateria.objects.get(pk=value)\n setattr(new, 'expediente', new_value)\n setattr(new, field.name, None)\n continue\n except ObjectDoesNotExist:\n msg = 'FK [%s] não encontrada para valor %s ' \\\n '(em %s %s)' % (\n field.name, value,\n field.model.__name__, label or '---')\n with reversion.create_revision():\n value = make_stub(field.related_model, value)\n descricao = 'stub criado para entrada orfã!'\n warn(msg + ' => ' + descricao)\n save_relation(value, [field.name], msg, descricao,\n eh_stub=True)\n reversion.set_comment('Stub criado pela migração')\n setattr(new, field.name, value)\n elif field.model.__name__ == 'TipoAutor' and \\\n field.name == 'content_type':\n\n model = normalize(new.descricao.lower()).replace(' ', '')\n content_types = field.related_model.objects.filter(\n model=model).exclude(app_label='legacy')\n assert len(content_types) <= 1\n\n value = content_types[0] if content_types else None\n setattr(new, field.name, value)\n\n def migrate(self, obj=appconfs, interativo=True):\n # warning: model/app migration order is of utmost importance\n exec_sql_file(PROJECT_DIR.child(\n 'sapl', 'legacy', 'scripts', 'fix_tables.sql'), 'legacy')\n self.to_delete = []\n\n # excluindo database antigo.\n if interativo:\n info('Todos os dados do banco serão excluidos. '\n 'Recomendamos que faça backup do banco sapl '\n 'antes de continuar.')\n info('Deseja continuar? [s/n]')\n resposta = input()\n if resposta.lower() in ['s', 'sim', 'y', 'yes']:\n pass\n else:\n info('Migração cancelada.')\n return 0\n info('Excluindo entradas antigas do banco.')\n call([PROJECT_DIR.child('manage.py'), 'flush',\n '--settings=sapl.settings', '--database=default', '--no-input'],\n stdout=PIPE)\n\n info('Começando migração: %s...' % obj)\n self._do_migrate(obj)\n # exclude logically deleted in legacy base\n info('Deletando models com ind_excluido...')\n while self.to_delete:\n for obj in self.to_delete:\n try:\n obj.delete()\n self.to_delete.remove(obj)\n except ProtectedError:\n msg = 'A entrada de PK %s da model %s não pode ser ' \\\n 'excluida' % (obj.pk, obj._meta.model_name)\n descricao = 'Um ou mais objetos protegidos '\n warn(msg + ' => ' + descricao)\n save_relation(obj=obj, problema=msg,\n descricao=descricao, eh_stub=False)\n\n info('Deletando stubs desnecessários...')\n while self.delete_stubs():\n pass\n info('Recriando unique constraints...')\n # recreate_constraints()\n\n def _do_migrate(self, obj):\n if isinstance(obj, AppConfig):\n models_to_migrate = (model for model in obj.models.values()\n if model in self.field_renames)\n self._do_migrate(models_to_migrate)\n elif isinstance(obj, ModelBase):\n # A migração vai pular TipoProposicao e só vai migrar essa model\n # antes de migrar Proposicao. Isso deve acontecer por causa da\n # GenericRelation existente em TipoProposicao.\n if not obj.__name__ == 'TipoProposicao':\n if obj.__name__ == 'Proposicao':\n self.migrate_model(TipoProposicao)\n self.migrate_model(obj)\n elif hasattr(obj, '__iter__'):\n for item in obj:\n self._do_migrate(item)\n else:\n raise TypeError(\n 'Parameter must be a Model, AppConfig or a sequence of them')\n\n def migrate_model(self, model):\n print('Migrando %s...' % model.__name__)\n\n legacy_model_name = self.model_renames.get(model, model.__name__)\n legacy_model = legacy_app.get_model(legacy_model_name)\n legacy_pk_name = legacy_model._meta.pk.name\n\n # Clear all model entries\n # They may have been created in a previous migration attempt\n try:\n model.objects.all().delete()\n except ProtectedError:\n Proposicao.objects.all().delete()\n model.objects.all().delete()\n delete_constraints(model)\n\n # setup migration strategy for tables with or without a pk\n if legacy_pk_name == 'id':\n # There is no pk in the legacy table\n def save(new, old):\n with reversion.create_revision():\n new.save()\n reversion.set_comment('Objeto criado pela migração')\n old_records = iter_sql_records(\n 'select * from ' + legacy_model._meta.db_table, 'legacy')\n else:\n def save(new, old):\n with reversion.create_revision():\n save_with_id(new, getattr(old, legacy_pk_name))\n reversion.set_comment('Objeto criado pela migração')\n\n old_records = legacy_model.objects.all().order_by(legacy_pk_name)\n\n ajuste_antes_salvar = AJUSTE_ANTES_SALVAR.get(model)\n ajuste_depois_salvar = AJUSTE_DEPOIS_SALVAR.get(model)\n\n # convert old records to new ones\n for old in old_records:\n new = model()\n self.populate_renamed_fields(new, old)\n if ajuste_antes_salvar:\n ajuste_antes_salvar(new, old)\n save(new, old)\n if ajuste_depois_salvar:\n ajuste_depois_salvar(new, old)\n if self.data_mudada:\n with reversion.create_revision():\n save_relation(**self.data_mudada)\n self.data_mudada.clear()\n reversion.set_comment('Ajuste de data pela migração')\n if getattr(old, 'ind_excluido', False):\n self.to_delete.append(new)\n\n def delete_stubs(self):\n excluidos = 0\n for obj in ProblemaMigracao.objects.all():\n if obj.content_object and obj.eh_stub:\n original = obj.content_type.get_all_objects_for_this_type(\n id=obj.object_id)\n if stub_desnecessario(original[0]):\n qtd_exclusoes, *_ = original.delete()\n assert qtd_exclusoes == 1\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n elif not obj.content_object and not obj.eh_stub:\n qtd_exclusoes, *_ = obj.delete()\n assert qtd_exclusoes == 1\n excluidos = excluidos + 1\n return excluidos\n\n\ndef migrate(obj=appconfs, interativo=True):\n dm = DataMigrator()\n dm.migrate(obj, interativo)\n\n\n# MIGRATION_ADJUSTMENTS #####################################################\n\ndef adjust_ordemdia(new, old):\n # Prestar atenção\n if not old.tip_votacao:\n new.tipo_votacao = 1\n\n\ndef adjust_parlamentar(new, old):\n if old.ind_unid_deliberativa:\n value = new.unidade_deliberativa\n # Field is defined as not null in legacy db,\n # but data includes null values\n # => transform None to False\n if value is None:\n warn('nulo convertido para falso')\n new.unidade_deliberativa = False\n\n\ndef adjust_participacao(new, old):\n composicao = Composicao()\n composicao.comissao, composicao.periodo = [\n get_fk_related(Composicao._meta.get_field(name), value)\n for name, value in (('comissao', old.cod_comissao),\n ('periodo', old.cod_periodo_comp))]\n # check if there is already an \"equal\" one in the db\n already_created = Composicao.objects.filter(\n comissao=composicao.comissao, periodo=composicao.periodo)\n if already_created:\n assert len(already_created) == 1 # we must never have made 2 copies\n [composicao] = already_created\n else:\n with reversion.create_revision():\n composicao.save()\n reversion.set_comment('Objeto criado pela migração')\n new.composicao = composicao\n\n\ndef adjust_protocolo(new, old):\n if new.numero is None and not primeira_vez:\n p = ProtocoloLegado.objects.filter(\n ano_protocolo=new.ano).aggregate(Max('num_protocolo'))\n numero_maximo = p['num_protocolo__max']\n new.numero = 1 if numero_maximo is None else numero_maximo + 1\n primeira_vez.append(True)\n if new.numero is None and primeira_vez:\n p = Protocolo.objects.filter(\n ano=new.ano).aggregate(Max('numero'))\n new.numero = p['numero__max'] + 1\n\n\ndef adjust_sessaoplenaria(new, old):\n assert not old.tip_expediente\n\n\ndef adjust_tipoproposicao(new, old):\n if old.ind_mat_ou_doc == 'M':\n new.tipo_conteudo_related = TipoMateriaLegislativa.objects.get(\n pk=old.tip_mat_ou_doc)\n elif old.ind_mat_ou_doc == 'D':\n new.tipo_conteudo_related = TipoDocumento.objects.get(\n pk=old.tip_mat_ou_doc)\n\n\ndef adjust_statustramitacao(new, old):\n if old.ind_fim_tramitacao:\n new.indicador = 'F'\n elif old.ind_retorno_tramitacao:\n new.indicador = 'R'\n else:\n new.indicador = ''\n\n\ndef adjust_statustramitacaoadm(new, old):\n adjust_statustramitacao(new, old)\n\n\ndef adjust_tramitacao(new, old):\n if old.sgl_turno == 'Ú':\n new.turno = 'U'\n\n\ndef adjust_normajuridica_antes_salvar(new, old):\n # Ajusta choice de esfera_federacao\n # O 'S' vem de 'Selecionar'. Na versão antiga do SAPL, quando uma opção do\n # combobox era selecionada, o sistema pegava a primeira letra da seleção,\n # sendo F para Federal, E para Estadual, M para Municipal e o S para\n # Selecionar, que era a primeira opção quando nada era selecionado.\n if old.tip_esfera_federacao == 'S':\n new.esfera_federacao = ''\n\n\ndef adjust_normajuridica_depois_salvar(new, old):\n # Ajusta relação M2M\n lista_pks_assunto = old.cod_assunto.split(',')\n\n # list(filter(..)) usado para retirar strings vazias da lista\n for pk_assunto in list(filter(None, lista_pks_assunto)):\n new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))\n\n\ndef adjust_protocolo_depois_salvar(new, old):\n if old.num_protocolo is None:\n with reversion.create_revision():\n problema = 'Número do protocolo de PK %s é nulo' % new.pk\n descricao = 'Número do protocolo alterado para %s!' % new.numero\n warn(problema + ' => ' + descricao)\n save_relation(obj=new, problema=problema,\n descricao=descricao, eh_stub=False)\n reversion.set_comment('Numero de protocolo teve que ser alterado')\n\n\ndef adjust_autor(new, old):\n if old.cod_parlamentar:\n new.autor_related = Parlamentar.objects.get(pk=old.cod_parlamentar)\n new.nome = new.autor_related.nome_parlamentar\n elif old.cod_comissao:\n new.autor_related = Comissao.objects.get(pk=old.cod_comissao)\n\n if old.col_username:\n if not get_user_model().objects.filter(\n username=old.col_username).exists():\n user = get_user_model()(\n username=old.col_username, password=12345)\n with reversion.create_revision():\n user.save()\n reversion.set_comment('Objeto criado pela migração')\n new.user = user\n else:\n new.user = get_user_model().objects.filter(\n username=old.col_username)[0]\n\n\ndef adjust_comissao(new, old):\n if old.dat_extincao:\n if date.today() < new.data_extincao:\n new.ativa = True\n else:\n new.ativa = False\n if not old.dat_extincao:\n new.ativa = True\n\n\nAJUSTE_ANTES_SALVAR = {\n Autor: adjust_autor,\n Comissao: adjust_comissao,\n NormaJuridica: adjust_normajuridica_antes_salvar,\n OrdemDia: adjust_ordemdia,\n Parlamentar: adjust_parlamentar,\n Participacao: adjust_participacao,\n Protocolo: adjust_protocolo,\n SessaoPlenaria: adjust_sessaoplenaria,\n TipoProposicao: adjust_tipoproposicao,\n StatusTramitacao: adjust_statustramitacao,\n StatusTramitacaoAdministrativo: adjust_statustramitacaoadm,\n Tramitacao: adjust_tramitacao,\n}\n\nAJUSTE_DEPOIS_SALVAR = {\n NormaJuridica: adjust_normajuridica_depois_salvar,\n Protocolo: adjust_protocolo_depois_salvar,\n}\n\n# CHECKS ####################################################################\n\n\ndef get_ind_excluido(obj):\n legacy_model = legacy_app.get_model(type(obj).__name__)\n return getattr(legacy_model.objects.get(\n **{legacy_model._meta.pk.name: obj.id}), 'ind_excluido', False)\n\n\ndef check_app_no_ind_excluido(app):\n for model in app.models.values():\n assert not any(get_ind_excluido(obj) for obj in model.objects.all())\n print('OK!')\n\n# MOMMY MAKE WITH LOG ######################################################\n\n\ndef make_with_log(model, _quantity=None, make_m2m=False, **attrs):\n last_value = get_last_value(model)\n alter_sequence(model, last_value + 1)\n fields_dict = get_fields_dict(model)\n stub = make(model, _quantity, make_m2m, **fields_dict)\n problema = 'Um stub foi necessário durante a criação de um outro stub'\n descricao = 'Essa entrada é necessária para um dos stubs criados'\n ' anteriormente'\n warn(problema)\n save_relation(obj=stub, problema=problema,\n descricao=descricao, eh_stub=True)\n return stub\n\nmake_with_log.required = foreign_key_required\n",
"path": "sapl/legacy/migration.py"
}
] | diff --git a/sapl/legacy/migration.py b/sapl/legacy/migration.py
index 307eef716..379ac13ad 100644
--- a/sapl/legacy/migration.py
+++ b/sapl/legacy/migration.py
@@ -633,7 +633,9 @@ def adjust_normajuridica_antes_salvar(new, old):
def adjust_normajuridica_depois_salvar(new, old):
# Ajusta relação M2M
lista_pks_assunto = old.cod_assunto.split(',')
- for pk_assunto in lista_pks_assunto:
+
+ # list(filter(..)) usado para retirar strings vazias da lista
+ for pk_assunto in list(filter(None, lista_pks_assunto)):
new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))
| Erro na função adjust_normajuridica_depois_salvar()
```
File "[...]/sapl/sapl/legacy/migration.py", line 636, in adjust_normajuridica_depois_salvar
new.assuntos.add(AssuntoNorma.objects.get(pk=pk_assunto))
ValueError: invalid literal for int() with base 10: ''
```
É preciso verificar porque esse erro está acontecendo. Aparentemente o script não está conseguindo retornar o valor da query `AssuntoNorma.objects.get(pk=pk_assunto)` porque pk_assunto é uma string vazia quando deveria ser um número. Pode ser por conta de alguma inconsistência na antiga relação de many to many do SAPL2.5, que era feito por uma string separada por vírgulas.
|
uccser__cs-unplugged-862 | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nPREPEND_WWW = True\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n",
"path": "csunplugged/config/settings/production.py"
}
] | [
{
"content": "# -*- coding: utf-8 -*-\n\"\"\"\nDjango settings for production environment.\n\n- Load secret values from environment variables.\n- Set static URL to Google Cloud Storage Bucket.\n\"\"\"\n\nfrom .base import * # noqa: F403\n\n\n# SECRET CONFIGURATION\n# ------------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key\n# Raises ImproperlyConfigured exception if DJANGO_SECRET_KEY not in os.environ\nSECRET_KEY = env(\"DJANGO_SECRET_KEY\") # noqa: F405\n\n# SECURITY WARNING: App Engine\"s security features ensure that it is safe to\n# have ALLOWED_HOSTS = [\"*\"] when the app is deployed. If you deploy a Django\n# app not on App Engine, make sure to set an appropriate host here.\n# See https://docs.djangoproject.com/en/1.10/ref/settings/\nALLOWED_HOSTS = [\"*\"]\n\n# URL Configuration\n# ------------------------------------------------------------------------------\nif env(\"DEPLOYMENT\", default=None) == \"prod\": # noqa: F405\n PREPEND_WWW = True\nelse:\n PREPEND_WWW = False\n\n# DATABASE CONFIGURATION\n# ----------------------------------------------------------------------------\n# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases\nDATABASES = {\n \"default\": {\n \"ENGINE\": \"django.db.backends.postgresql\",\n \"NAME\": \"csunplugged\",\n \"USER\": env(\"GOOGLE_CLOUD_SQL_DATABASE_USERNAME\"), # noqa: F405\n \"PASSWORD\": env(\"GOOGLE_CLOUD_SQL_DATABASE_PASSWORD\"), # noqa: F405\n \"HOST\": \"/cloudsql/\" + env(\"GOOGLE_CLOUD_SQL_CONNECTION_NAME\"), # noqa: F405\n }\n}\nDATABASES[\"default\"][\"ATOMIC_REQUESTS\"] = True\n\n# Static files\nSTATIC_URL = \"https://storage.googleapis.com/\" + env(\"GOOGLE_CLOUD_STORAGE_BUCKET_NAME\") + \"/static/\" # noqa: F405\n\n# SECURITY CONFIGURATION\n# ------------------------------------------------------------------------------\n# See https://docs.djangoproject.com/en/dev/ref/middleware/#module-django.middleware.security\n# and https://docs.djangoproject.com/en/dev/howto/deployment/checklist/#run-manage-py-check-deploy\n\n# set this to 60 seconds and then to 518400 when you can prove it works\nSECURE_HSTS_SECONDS = 60\nSECURE_PROXY_SSL_HEADER = (\"HTTP_X_FORWARDED_PROTO\", \"https\")\nSECURE_HSTS_INCLUDE_SUBDOMAINS = env.bool(\"DJANGO_SECURE_HSTS_INCLUDE_SUBDOMAINS\", default=True) # noqa: F405\nSECURE_CONTENT_TYPE_NOSNIFF = env.bool(\"DJANGO_SECURE_CONTENT_TYPE_NOSNIFF\", default=True) # noqa: F405\nSECURE_BROWSER_XSS_FILTER = True\nSESSION_COOKIE_SECURE = True\nSESSION_COOKIE_HTTPONLY = True\nSECURE_SSL_REDIRECT = env.bool(\"DJANGO_SECURE_SSL_REDIRECT\", default=True) # noqa: F405\nCSRF_COOKIE_SECURE = True\nCSRF_COOKIE_HTTPONLY = True\nX_FRAME_OPTIONS = \"DENY\"\n",
"path": "csunplugged/config/settings/production.py"
}
] | diff --git a/csunplugged/config/settings/production.py b/csunplugged/config/settings/production.py
index 3ade6034b..cb1c808a2 100644
--- a/csunplugged/config/settings/production.py
+++ b/csunplugged/config/settings/production.py
@@ -23,7 +23,10 @@
# URL Configuration
# ------------------------------------------------------------------------------
-PREPEND_WWW = True
+if env("DEPLOYMENT", default=None) == "prod": # noqa: F405
+ PREPEND_WWW = True
+else:
+ PREPEND_WWW = False
# DATABASE CONFIGURATION
# ----------------------------------------------------------------------------
| Only prepend www for production website
It should not be used for development website.
|
kivy__kivy-2526 | [
{
"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x <= 0) or\n (btn == 'scrollright' and self.scroll_x >= 1)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n",
"path": "kivy/uix/scrollview.py"
}
] | [
{
"content": "'''Scroll View\n===========\n\n.. versionadded:: 1.0.4\n\nThe :class:`ScrollView` widget provides a scrollable/pannable viewport that is\nclipped at the scrollview's bounding box.\n\n\nScrolling Behavior\n------------------\n\nThe ScrollView accepts only one child and applies a viewport/window to\nit according to the :attr:`ScrollView.scroll_x` and\n:attr:`ScrollView.scroll_y` properties. Touches are analyzed to\ndetermine if the user wants to scroll or control the child in some\nother manner - you cannot do both at the same time. To determine if\ninteraction is a scrolling gesture, these properties are used:\n\n - :attr:`ScrollView.scroll_distance`: the minimum distance to travel,\n defaults to 20 pixels.\n - :attr:`ScrollView.scroll_timeout`: the maximum time period, defaults\n to 250 milliseconds.\n\nIf a touch travels :attr:`~ScrollView.scroll_distance` pixels within the\n:attr:`~ScrollView.scroll_timeout` period, it is recognized as a scrolling\ngesture and translation (scroll/pan) will begin. If the timeout occurs, the\ntouch down event is dispatched to the child instead (no translation).\n\nThe default value for those settings can be changed in the configuration file::\n\n [widgets]\n scroll_timeout = 250\n scroll_distance = 20\n\n.. versionadded:: 1.1.1\n\n ScrollView now animates scrolling in Y when a mousewheel is used.\n\n\nLimiting to the X or Y Axis\n---------------------------\n\nBy default, the ScrollView allows scrolling in both the X and Y axes. You can\nexplicitly disable scrolling on an axis by setting\n:attr:`ScrollView.do_scroll_x` or :attr:`ScrollView.do_scroll_y` to False.\n\n\nManaging the Content Size and Position\n--------------------------------------\n\nScrollView manages the position of its children similarly to a\nRelativeLayout (see :mod:`~kivy.uix.relativelayout`) but not the size. You must\ncarefully specify the `size_hint` of your content to get the desired\nscroll/pan effect.\n\nBy default, size_hint is (1, 1), so the content size will fit your ScrollView\nexactly (you will have nothing to scroll). You must deactivate at least one of\nthe size_hint instructions (x or y) of the child to enable scrolling.\n\nTo scroll a :class:`GridLayout` on Y-axis/vertically, set the child's width\nidentical to that of the ScrollView (size_hint_x=1, default), and set the\nsize_hint_y property to None::\n\n layout = GridLayout(cols=1, spacing=10, size_hint_y=None)\n # Make sure the height is such that there is something to scroll.\n layout.bind(minimum_height=layout.setter('height'))\n for i in range(30):\n btn = Button(text=str(i), size_hint_y=None, height=40)\n layout.add_widget(btn)\n root = ScrollView(size_hint=(None, None), size=(400, 400))\n root.add_widget(layout)\n\n\nOverscroll Effects\n------------------\n\n.. versionadded:: 1.7.0\n\nWhen scrolling would exceed the bounds of the :class:`ScrollView`, it\nuses a :class:`~kivy.effects.scroll.ScrollEffect` to handle the\noverscroll. These effects can perform actions like bouncing back,\nchanging opacity, or simply preventing scrolling beyond the normal\nboundaries. Note that complex effects may perform many computations,\nwhich can be slow on weaker hardware.\n\nYou can change what effect is being used by setting\n:attr:`ScrollView.effect_cls` to any effect class. Current options\ninclude:\n\n - :class:`~kivy.effects.scroll.ScrollEffect`: Does not allow\n scrolling beyond the :class:`ScrollView` boundaries.\n - :class:`~kivy.effects.dampedscroll.DampedScrollEffect`: The\n current default. Allows the user to scroll beyond the normal\n boundaries, but has the content spring back once the\n touch/click is released.\n - :class:`~kivy.effects.opacityscroll.OpacityScrollEffect`: Similar\n to the :class:`~kivy.effect.dampedscroll.DampedScrollEffect`, but\n also reduces opacity during overscroll.\n\nYou can also create your own scroll effect by subclassing one of these,\nthen pass it as the :attr:`~ScrollView.effect_cls` in the same way.\n\nAlternatively, you can set :attr:`ScrollView.effect_x` and/or\n:attr:`ScrollView.effect_y` to an *instance* of the effect you want to\nuse. This will override the default effect set in\n:attr:`ScrollView.effect_cls`.\n\nAll the effects are located in the :mod:`kivy.effects`.\n\n'''\n\n__all__ = ('ScrollView', )\n\nfrom functools import partial\nfrom kivy.animation import Animation\nfrom kivy.compat import string_types\nfrom kivy.config import Config\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\nfrom kivy.uix.stencilview import StencilView\nfrom kivy.metrics import sp\nfrom kivy.effects.dampedscroll import DampedScrollEffect\nfrom kivy.properties import NumericProperty, BooleanProperty, AliasProperty, \\\n ObjectProperty, ListProperty, ReferenceListProperty, OptionProperty\n\n\n# When we are generating documentation, Config doesn't exist\n_scroll_timeout = _scroll_distance = 0\nif Config:\n _scroll_timeout = Config.getint('widgets', 'scroll_timeout')\n _scroll_distance = sp(Config.getint('widgets', 'scroll_distance'))\n\n\nclass ScrollView(StencilView):\n '''ScrollView class. See module documentation for more information.\n\n .. versionchanged:: 1.7.0\n `auto_scroll`, `scroll_friction`, `scroll_moves`, `scroll_stoptime' has\n been deprecated, use :attr:`effect_cls` instead.\n '''\n\n scroll_distance = NumericProperty(_scroll_distance)\n '''Distance to move before scrolling the :class:`ScrollView`, in pixels. As\n soon as the distance has been traveled, the :class:`ScrollView` will start\n to scroll, and no touch event will go to children.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n :attr:`scroll_distance` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 20 (pixels), according to the default value in user\n configuration.\n '''\n\n scroll_wheel_distance = NumericProperty(20)\n '''Distance to move when scrolling with a mouse wheel.\n It is advisable that you base this value on the dpi of your target device's\n screen.\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_wheel_distance` is a\n :class:`~kivy.properties.NumericProperty` , defaults to 20 pixels.\n '''\n\n scroll_timeout = NumericProperty(_scroll_timeout)\n '''Timeout allowed to trigger the :attr:`scroll_distance`, in milliseconds.\n If the user has not moved :attr:`scroll_distance` within the timeout,\n the scrolling will be disabled, and the touch event will go to the\n children.\n\n :attr:`scroll_timeout` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 55 (milliseconds) according to the default value in user\n configuration.\n\n .. versionchanged:: 1.5.0\n Default value changed from 250 to 55.\n '''\n\n scroll_x = NumericProperty(0.)\n '''X scrolling value, between 0 and 1. If 0, the content's left side will\n touch the left side of the ScrollView. If 1, the content's right side will\n touch the right side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_x` is True.\n\n :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 0.\n '''\n\n scroll_y = NumericProperty(1.)\n '''Y scrolling value, between 0 and 1. If 0, the content's bottom side will\n touch the bottom side of the ScrollView. If 1, the content's top side will\n touch the top side.\n\n This property is controled by :class:`ScrollView` only if\n :attr:`do_scroll_y` is True.\n\n :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 1.\n '''\n\n do_scroll_x = BooleanProperty(True)\n '''Allow scroll on X axis.\n\n :attr:`do_scroll_x` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n do_scroll_y = BooleanProperty(True)\n '''Allow scroll on Y axis.\n\n :attr:`do_scroll_y` is a :class:`~kivy.properties.BooleanProperty` and\n defaults to True.\n '''\n\n def _get_do_scroll(self):\n return (self.do_scroll_x, self.do_scroll_y)\n\n def _set_do_scroll(self, value):\n if type(value) in (list, tuple):\n self.do_scroll_x, self.do_scroll_y = value\n else:\n self.do_scroll_x = self.do_scroll_y = bool(value)\n do_scroll = AliasProperty(_get_do_scroll, _set_do_scroll,\n bind=('do_scroll_x', 'do_scroll_y'))\n '''Allow scroll on X or Y axis.\n\n :attr:`do_scroll` is a :class:`~kivy.properties.AliasProperty` of\n (:attr:`do_scroll_x` + :attr:`do_scroll_y`)\n '''\n\n def _get_vbar(self):\n # must return (y, height) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vh = self._viewport.height\n h = self.height\n if vh < h or vh == 0:\n return 0, 1.\n ph = max(0.01, h / float(vh))\n sy = min(1.0, max(0.0, self.scroll_y))\n py = (1. - ph) * sy\n return (py, ph)\n\n vbar = AliasProperty(_get_vbar, None, bind=(\n 'scroll_y', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the vertical scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little vertical bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n def _get_hbar(self):\n # must return (x, width) in %\n # calculate the viewport size / scrollview size %\n if self._viewport is None:\n return 0, 1.\n vw = self._viewport.width\n w = self.width\n if vw < w or vw == 0:\n return 0, 1.\n pw = max(0.01, w / float(vw))\n sx = min(1.0, max(0.0, self.scroll_x))\n px = (1. - pw) * sx\n return (px, pw)\n\n hbar = AliasProperty(_get_hbar, None, bind=(\n 'scroll_x', '_viewport', 'viewport_size'))\n '''Return a tuple of (position, size) of the horizontal scrolling bar.\n\n .. versionadded:: 1.2.0\n\n The position and size are normalized between 0-1, and represent a\n percentage of the current scrollview height. This property is used\n internally for drawing the little horizontal bar when you're scrolling.\n\n :attr:`vbar` is a :class:`~kivy.properties.AliasProperty`, readonly.\n '''\n\n bar_color = ListProperty([.7, .7, .7, .9])\n '''Color of horizontal / vertical scroll bar, in RGBA format.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_color` is a :class:`~kivy.properties.ListProperty` and defaults\n to [.7, .7, .7, .9].\n '''\n\n bar_inactive_color = ListProperty([.7, .7, .7, .2])\n '''Color of horizontal / vertical scroll bar (in RGBA format), when no\n scroll is happening.\n\n .. versionadded:: 1.9.0\n\n :attr:`bar_inactive_color` is a\n :class:`~kivy.properties.ListProperty` and defaults to [.7, .7, .7, .2].\n '''\n\n bar_width = NumericProperty('2dp')\n '''Width of the horizontal / vertical scroll bar. The width is interpreted\n as a height for the horizontal bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_width` is a :class:`~kivy.properties.NumericProperty` and\n defaults to 2.\n '''\n\n bar_pos_x = OptionProperty('bottom', options=('top', 'bottom'))\n '''Which side of the ScrollView the horizontal scroll bar should go\n on. Possible values are 'top' and 'bottom'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_x` is an :class:`~kivy.properties.OptionProperty`,\n default to 'bottom'\n\n '''\n\n bar_pos_y = OptionProperty('right', options=('left', 'right'))\n '''Which side of the ScrollView the vertical scroll bar should go\n on. Possible values are 'left' and 'right'.\n\n .. versionadded:: 1.8.0\n\n :attr:`bar_pos_y` is an :class:`~kivy.properties.OptionProperty`,\n default to 'right'\n\n '''\n\n bar_pos = ReferenceListProperty(bar_pos_x, bar_pos_y)\n '''Which side of the scroll view to place each of the bars on.\n\n :attr:`bar_pos` is a :class:`~kivy.properties.ReferenceListProperty` of\n (:attr:`bar_pos_x`, :attr:`bar_pos_y`)\n '''\n\n bar_margin = NumericProperty(0)\n '''Margin between the bottom / right side of the scrollview when drawing\n the horizontal / vertical scroll bar.\n\n .. versionadded:: 1.2.0\n\n :attr:`bar_margin` is a :class:`~kivy.properties.NumericProperty`, default\n to 0\n '''\n\n effect_cls = ObjectProperty(DampedScrollEffect, allownone=True)\n '''Class effect to instanciate for X and Y axis.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_cls` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to :class:`DampedScrollEffect`.\n\n .. versionchanged:: 1.8.0\n If you set a string, the :class:`~kivy.factory.Factory` will be used to\n resolve the class.\n\n '''\n\n effect_x = ObjectProperty(None, allownone=True)\n '''Effect to apply for the X axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_x` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None.\n '''\n\n effect_y = ObjectProperty(None, allownone=True)\n '''Effect to apply for the Y axis. If None is set, an instance of\n :attr:`effect_cls` will be created.\n\n .. versionadded:: 1.7.0\n\n :attr:`effect_y` is an :class:`~kivy.properties.ObjectProperty` and\n defaults to None, read-only.\n '''\n\n viewport_size = ListProperty([0, 0])\n '''(internal) Size of the internal viewport. This is the size of your only\n child in the scrollview.\n '''\n\n scroll_type = OptionProperty(['content'], options=(['content'], ['bars'],\n ['bars', 'content'], ['content', 'bars']))\n '''Sets the type of scrolling to use for the content of the scrollview.\n Available options are: ['content'], ['bars'], ['bars', 'content'].\n\n .. versionadded:: 1.8.0\n\n :attr:`scroll_type` is a :class:`~kivy.properties.OptionProperty`, defaults\n to ['content'].\n '''\n\n # private, for internal use only\n\n _viewport = ObjectProperty(None, allownone=True)\n _bar_color = ListProperty([0, 0, 0, 0])\n\n def _set_viewport_size(self, instance, value):\n self.viewport_size = value\n\n def on__viewport(self, instance, value):\n if value:\n value.bind(size=self._set_viewport_size)\n self.viewport_size = value.size\n\n def __init__(self, **kwargs):\n self._touch = None\n self._trigger_update_from_scroll = Clock.create_trigger(\n self.update_from_scroll, -1)\n # create a specific canvas for the viewport\n from kivy.graphics import PushMatrix, Translate, PopMatrix, Canvas\n self.canvas_viewport = Canvas()\n self.canvas = Canvas()\n with self.canvas_viewport.before:\n PushMatrix()\n self.g_translate = Translate(0, 0)\n with self.canvas_viewport.after:\n PopMatrix()\n\n super(ScrollView, self).__init__(**kwargs)\n\n self.register_event_type('on_scroll_start')\n self.register_event_type('on_scroll_move')\n self.register_event_type('on_scroll_stop')\n\n # now add the viewport canvas to our canvas\n self.canvas.add(self.canvas_viewport)\n\n effect_cls = self.effect_cls\n if isinstance(effect_cls, string_types):\n effect_cls = Factory.get(effect_cls)\n if self.effect_x is None and effect_cls is not None:\n self.effect_x = effect_cls(target_widget=self._viewport)\n if self.effect_y is None and effect_cls is not None:\n self.effect_y = effect_cls(target_widget=self._viewport)\n self.bind(\n width=self._update_effect_x_bounds,\n height=self._update_effect_y_bounds,\n viewport_size=self._update_effect_bounds,\n _viewport=self._update_effect_widget,\n scroll_x=self._trigger_update_from_scroll,\n scroll_y=self._trigger_update_from_scroll,\n pos=self._trigger_update_from_scroll,\n size=self._trigger_update_from_scroll)\n\n self._update_effect_widget()\n self._update_effect_x_bounds()\n self._update_effect_y_bounds()\n\n def on_effect_x(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_x)\n value.target_widget = self._viewport\n\n def on_effect_y(self, instance, value):\n if value:\n value.bind(scroll=self._update_effect_y)\n value.target_widget = self._viewport\n\n def on_effect_cls(self, instance, cls):\n if isinstance(cls, string_types):\n cls = Factory.get(cls)\n self.effect_x = cls(target_widget=self._viewport)\n self.effect_x.bind(scroll=self._update_effect_x)\n self.effect_y = cls(target_widget=self._viewport)\n self.effect_y.bind(scroll=self._update_effect_y)\n\n def _update_effect_widget(self, *args):\n if self.effect_x:\n self.effect_x.target_widget = self._viewport\n if self.effect_y:\n self.effect_y.target_widget = self._viewport\n\n def _update_effect_x_bounds(self, *args):\n if not self._viewport or not self.effect_x:\n return\n self.effect_x.min = -(self.viewport_size[0] - self.width)\n self.effect_x.max = 0\n self.effect_x.value = self.effect_x.min * self.scroll_x\n\n def _update_effect_y_bounds(self, *args):\n if not self._viewport or not self.effect_y:\n return\n self.effect_y.min = -(self.viewport_size[1] - self.height)\n self.effect_y.max = 0\n self.effect_y.value = self.effect_y.min * self.scroll_y\n\n def _update_effect_bounds(self, *args):\n if not self._viewport:\n return\n if self.effect_x:\n self._update_effect_x_bounds()\n if self.effect_y:\n self._update_effect_y_bounds()\n\n def _update_effect_x(self, *args):\n vp = self._viewport\n if not vp or not self.effect_x:\n return\n sw = vp.width - self.width\n if sw < 1:\n return\n sx = self.effect_x.scroll / float(sw)\n self.scroll_x = -sx\n self._trigger_update_from_scroll()\n\n def _update_effect_y(self, *args):\n vp = self._viewport\n if not vp or not self.effect_y:\n return\n sh = vp.height - self.height\n if sh < 1:\n return\n sy = self.effect_y.scroll / float(sh)\n self.scroll_y = -sy\n self._trigger_update_from_scroll()\n\n def to_local(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x - tx, y - ty\n\n def to_parent(self, x, y, **k):\n tx, ty = self.g_translate.xy\n return x + tx, y + ty\n\n def simulate_touch_down(self, touch):\n # at this point the touch is in parent coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n ret = super(ScrollView, self).on_touch_down(touch)\n touch.pop()\n return ret\n\n def on_touch_down(self, touch):\n if self.dispatch('on_scroll_start', touch):\n self._touch = touch\n touch.grab(self)\n return True\n\n def on_scroll_start(self, touch, check_children=True):\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_start', touch):\n return True\n touch.pop()\n\n if not self.collide_point(*touch.pos):\n touch.ud[self._get_uid('svavoid')] = True\n return\n if self.disabled:\n return True\n if self._touch or (not (self.do_scroll_x or self.do_scroll_y)):\n return self.simulate_touch_down(touch)\n\n # handle mouse scrolling, only if the viewport size is bigger than the\n # scrollview size, and if the user allowed to do it\n vp = self._viewport\n if not vp:\n return True\n scroll_type = self.scroll_type\n ud = touch.ud\n scroll_bar = 'bars' in scroll_type\n\n # check if touch is in bar_x(horizontal) or bay_y(bertical)\n ud['in_bar_x'] = ud['in_bar_y'] = False\n width_scrollable = vp.width > self.width\n height_scrollable = vp.height > self.height\n bar_pos_x = self.bar_pos_x[0]\n bar_pos_y = self.bar_pos_y[0]\n\n d = {'b': True if touch.y < self.y + self.bar_width else False,\n 't': True if touch.y > self.top - self.bar_width else False,\n 'l': True if touch.x < self.x + self.bar_width else False,\n 'r': True if touch.x > self.right - self.bar_width else False}\n if scroll_bar:\n if (width_scrollable and d[bar_pos_x]):\n ud['in_bar_x'] = True\n if (height_scrollable and d[bar_pos_y]):\n ud['in_bar_y'] = True\n\n if vp and 'button' in touch.profile and \\\n touch.button.startswith('scroll'):\n btn = touch.button\n m = sp(self.scroll_wheel_distance)\n e = None\n\n if ((btn == 'scrolldown' and self.scroll_y >= 1) or\n (btn == 'scrollup' and self.scroll_y <= 0) or\n (btn == 'scrollleft' and self.scroll_x <= 0) or\n (btn == 'scrollright' and self.scroll_x >= 1)):\n return False\n\n if (self.effect_x and self.do_scroll_y and height_scrollable\n and btn in ('scrolldown', 'scrollup')):\n e = self.effect_x if ud['in_bar_x'] else self.effect_y\n\n elif (self.effect_y and self.do_scroll_x and width_scrollable\n and btn in ('scrollleft', 'scrollright')):\n e = self.effect_y if ud['in_bar_y'] else self.effect_x\n\n if e:\n if btn in ('scrolldown', 'scrollleft'):\n e.value = max(e.value - m, e.min)\n e.velocity = 0\n elif btn in ('scrollup', 'scrollright'):\n e.value = min(e.value + m, e.max)\n e.velocity = 0\n touch.ud[self._get_uid('svavoid')] = True\n e.trigger_velocity_update()\n return True\n\n # no mouse scrolling, so the user is going to drag the scrollview with\n # this touch.\n self._touch = touch\n uid = self._get_uid()\n\n ud[uid] = {\n 'mode': 'unknown',\n 'dx': 0,\n 'dy': 0,\n 'user_stopped': False,\n 'frames': Clock.frames,\n 'time': touch.time_start}\n\n if self.do_scroll_x and self.effect_x and not ud['in_bar_x']:\n self.effect_x.start(touch.x)\n self._scroll_x_mouse = self.scroll_x\n if self.do_scroll_y and self.effect_y and not ud['in_bar_y']:\n self.effect_y.start(touch.y)\n self._scroll_y_mouse = self.scroll_y\n\n if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):\n return True\n if scroll_type == ['bars']:\n # touch is in parent, but _change_touch_mode expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return False\n else:\n Clock.schedule_once(self._change_touch_mode,\n self.scroll_timeout / 1000.)\n return True\n\n def on_touch_move(self, touch):\n if self._touch is not touch:\n # touch is in parent\n touch.push()\n touch.apply_transform_2d(self.to_local)\n super(ScrollView, self).on_touch_move(touch)\n touch.pop()\n return self._get_uid() in touch.ud\n if touch.grab_current is not self:\n return True\n\n touch.ud['sv.handled'] = {'x': False, 'y': False}\n if self.dispatch('on_scroll_move', touch):\n return True\n\n def on_scroll_move(self, touch):\n if self._get_uid('svavoid') in touch.ud:\n return False\n\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_move', touch):\n return True\n touch.pop()\n\n rv = True\n\n uid = self._get_uid()\n if not uid in touch.ud:\n self._touch = False\n return self.on_scroll_start(touch, False)\n ud = touch.ud[uid]\n mode = ud['mode']\n\n # check if the minimum distance has been travelled\n if mode == 'unknown' or mode == 'scroll':\n if not touch.ud['sv.handled']['x'] and self.do_scroll_x \\\n and self.effect_x:\n width = self.width\n if touch.ud.get('in_bar_x', False):\n dx = touch.dx / float(width - width * self.hbar[1])\n self.scroll_x = min(max(self.scroll_x + dx, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_x.update(touch.x)\n if self.scroll_x < 0 or self.scroll_x > 1:\n rv = False\n else:\n touch.ud['sv.handled']['x'] = True\n if not touch.ud['sv.handled']['y'] and self.do_scroll_y \\\n and self.effect_y:\n height = self.height\n if touch.ud.get('in_bar_y', False):\n dy = touch.dy / float(height - height * self.vbar[1])\n self.scroll_y = min(max(self.scroll_y + dy, 0.), 1.)\n self._trigger_update_from_scroll()\n else:\n if self.scroll_type != ['bars']:\n self.effect_y.update(touch.y)\n if self.scroll_y < 0 or self.scroll_y > 1:\n rv = False\n else:\n touch.ud['sv.handled']['y'] = True\n\n if mode == 'unknown':\n ud['dx'] += abs(touch.dx)\n ud['dy'] += abs(touch.dy)\n if ud['dx'] > self.scroll_distance:\n if not self.do_scroll_x:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n\n if ud['dy'] > self.scroll_distance:\n if not self.do_scroll_y:\n # touch is in parent, but _change expects window coords\n touch.push()\n touch.apply_transform_2d(self.to_local)\n touch.apply_transform_2d(self.to_window)\n self._change_touch_mode()\n touch.pop()\n return\n mode = 'scroll'\n ud['mode'] = mode\n\n if mode == 'scroll':\n ud['dt'] = touch.time_update - ud['time']\n ud['time'] = touch.time_update\n ud['user_stopped'] = True\n\n return rv\n\n def on_touch_up(self, touch):\n if self._touch is not touch and self.uid not in touch.ud:\n # touch is in parents\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if super(ScrollView, self).on_touch_up(touch):\n return True\n touch.pop()\n return False\n\n if self.dispatch('on_scroll_stop', touch):\n touch.ungrab(self)\n return True\n\n def on_scroll_stop(self, touch, check_children=True):\n self._touch = None\n\n if check_children:\n touch.push()\n touch.apply_transform_2d(self.to_local)\n if self.dispatch_children('on_scroll_stop', touch):\n return True\n touch.pop()\n\n if self._get_uid('svavoid') in touch.ud:\n return\n if self._get_uid() not in touch.ud:\n return False\n\n self._touch = None\n uid = self._get_uid()\n ud = touch.ud[uid]\n if self.do_scroll_x and self.effect_x:\n if not touch.ud.get('in_bar_x', False) and\\\n self.scroll_type != ['bars']:\n self.effect_x.stop(touch.x)\n if self.do_scroll_y and self.effect_y and\\\n self.scroll_type != ['bars']:\n if not touch.ud.get('in_bar_y', False):\n self.effect_y.stop(touch.y)\n if ud['mode'] == 'unknown':\n # we must do the click at least..\n # only send the click if it was not a click to stop\n # autoscrolling\n if not ud['user_stopped']:\n self.simulate_touch_down(touch)\n Clock.schedule_once(partial(self._do_touch_up, touch), .2)\n Clock.unschedule(self._update_effect_bounds)\n Clock.schedule_once(self._update_effect_bounds)\n\n # if we do mouse scrolling, always accept it\n if 'button' in touch.profile and touch.button.startswith('scroll'):\n return True\n\n return self._get_uid() in touch.ud\n\n def convert_distance_to_scroll(self, dx, dy):\n '''Convert a distance in pixels to a scroll distance, depending on the\n content size and the scrollview size.\n\n The result will be a tuple of scroll distance that can be added to\n :data:`scroll_x` and :data:`scroll_y`\n '''\n if not self._viewport:\n return 0, 0\n vp = self._viewport\n if vp.width > self.width:\n sw = vp.width - self.width\n sx = dx / float(sw)\n else:\n sx = 0\n if vp.height > self.height:\n sh = vp.height - self.height\n sy = dy / float(sh)\n else:\n sy = 1\n return sx, sy\n\n def update_from_scroll(self, *largs):\n '''Force the reposition of the content, according to current value of\n :attr:`scroll_x` and :attr:`scroll_y`.\n\n This method is automatically called when one of the :attr:`scroll_x`,\n :attr:`scroll_y`, :attr:`pos` or :attr:`size` properties change, or\n if the size of the content changes.\n '''\n if not self._viewport:\n return\n vp = self._viewport\n\n # update from size_hint\n if vp.size_hint_x is not None:\n vp.width = vp.size_hint_x * self.width\n if vp.size_hint_y is not None:\n vp.height = vp.size_hint_y * self.height\n\n if vp.width > self.width:\n sw = vp.width - self.width\n x = self.x - self.scroll_x * sw\n else:\n x = self.x\n if vp.height > self.height:\n sh = vp.height - self.height\n y = self.y - self.scroll_y * sh\n else:\n y = self.top - vp.height\n\n # from 1.8.0, we now use a matrix by default, instead of moving the\n # widget position behind. We set it here, but it will be a no-op most of\n # the time.\n vp.pos = 0, 0\n self.g_translate.xy = x, y\n\n # New in 1.2.0, show bar when scrolling happens and (changed in 1.9.0)\n # fade to bar_inactive_color when no scroll is happening.\n Clock.unschedule(self._bind_inactive_bar_color)\n self.unbind(bar_inactive_color=self._change_bar_color)\n Animation.stop_all(self, '_bar_color')\n self.bind(bar_color=self._change_bar_color)\n self._bar_color = self.bar_color\n Clock.schedule_once(self._bind_inactive_bar_color, .5)\n\n def _bind_inactive_bar_color(self, *l):\n self.unbind(bar_color=self._change_bar_color)\n self.bind(bar_inactive_color=self._change_bar_color)\n Animation(\n _bar_color=self.bar_inactive_color, d=.5, t='out_quart').start(self)\n\n def _change_bar_color(self, inst, value):\n self._bar_color = value\n\n #\n # Private\n #\n def add_widget(self, widget, index=0):\n if self._viewport:\n raise Exception('ScrollView accept only one widget')\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).add_widget(widget, index)\n self.canvas = canvas\n self._viewport = widget\n widget.bind(size=self._trigger_update_from_scroll)\n self._trigger_update_from_scroll()\n\n def remove_widget(self, widget):\n canvas = self.canvas\n self.canvas = self.canvas_viewport\n super(ScrollView, self).remove_widget(widget)\n self.canvas = canvas\n if widget is self._viewport:\n self._viewport = None\n\n def _get_uid(self, prefix='sv'):\n return '{0}.{1}'.format(prefix, self.uid)\n\n def _change_touch_mode(self, *largs):\n if not self._touch:\n return\n uid = self._get_uid()\n touch = self._touch\n ud = touch.ud[uid]\n if ud['mode'] != 'unknown' or ud['user_stopped']:\n return\n diff_frames = Clock.frames - ud['frames']\n\n # in order to be able to scroll on very slow devices, let at least 3\n # frames displayed to accumulate some velocity. And then, change the\n # touch mode. Otherwise, we might never be able to compute velocity, and\n # no way to scroll it. See #1464 and #1499\n if diff_frames < 3:\n Clock.schedule_once(self._change_touch_mode, 0)\n return\n\n if self.do_scroll_x and self.effect_x:\n self.effect_x.cancel()\n if self.do_scroll_y and self.effect_y:\n self.effect_y.cancel()\n # XXX the next line was in the condition. But this stop\n # the possibily to \"drag\" an object out of the scrollview in the\n # non-used direction: if you have an horizontal scrollview, a\n # vertical gesture will not \"stop\" the scroll view to look for an\n # horizontal gesture, until the timeout is done.\n # and touch.dx + touch.dy == 0:\n touch.ungrab(self)\n self._touch = None\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n touch.apply_transform_2d(self.to_parent)\n self.simulate_touch_down(touch)\n touch.pop()\n return\n\n def _do_touch_up(self, touch, *largs):\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n # don't forget about grab event!\n for x in touch.grab_list[:]:\n touch.grab_list.remove(x)\n x = x()\n if not x:\n continue\n touch.grab_current = x\n # touch is in window coords\n touch.push()\n touch.apply_transform_2d(self.to_widget)\n super(ScrollView, self).on_touch_up(touch)\n touch.pop()\n touch.grab_current = None\n\n\nif __name__ == '__main__':\n from kivy.app import App\n\n from kivy.uix.gridlayout import GridLayout\n from kivy.uix.button import Button\n\n class ScrollViewApp(App):\n\n def build(self):\n layout1 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout1.bind(minimum_height=layout1.setter('height'),\n minimum_width=layout1.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout1.add_widget(btn)\n scrollview1 = ScrollView(bar_width='2dp')\n scrollview1.add_widget(layout1)\n\n layout2 = GridLayout(cols=4, spacing=10, size_hint=(None, None))\n layout2.bind(minimum_height=layout2.setter('height'),\n minimum_width=layout2.setter('width'))\n for i in range(40):\n btn = Button(text=str(i), size_hint=(None, None),\n size=(200, 100))\n layout2.add_widget(btn)\n scrollview2 = ScrollView(scroll_type=['bars'],\n bar_width='9dp',\n scroll_wheel_distance=100)\n scrollview2.add_widget(layout2)\n\n root = GridLayout(cols=2)\n root.add_widget(scrollview1)\n root.add_widget(scrollview2)\n return root\n\n ScrollViewApp().run()\n",
"path": "kivy/uix/scrollview.py"
}
] | diff --git a/kivy/uix/scrollview.py b/kivy/uix/scrollview.py
index f2bd288144..155607b052 100644
--- a/kivy/uix/scrollview.py
+++ b/kivy/uix/scrollview.py
@@ -644,7 +644,7 @@ def on_scroll_start(self, touch, check_children=True):
self._scroll_y_mouse = self.scroll_y
if (ud.get('in_bar_x', False) or ud.get('in_bar_y', False)):
- return
+ return True
if scroll_type == ['bars']:
# touch is in parent, but _change_touch_mode expects window coords
touch.push()
| Inspector property list scrolling selects an underlying widget
While scrolling with the mouse wheel through the property list of a selected widget, a different widget (one underneath the property list) is selected, when reaching the top or the bottom of the list.
The same happens while trying to drag the view with the scrollbar.
|
encode__httpx-721 | [
{
"content": "import typing\n\nfrom ..backends.base import BaseSemaphore, ConcurrencyBackend, lookup_backend\nfrom ..config import (\n DEFAULT_POOL_LIMITS,\n CertTypes,\n PoolLimits,\n SSLConfig,\n Timeout,\n VerifyTypes,\n)\nfrom ..exceptions import PoolTimeout\nfrom ..models import Origin, Request, Response\nfrom ..utils import get_logger\nfrom .base import Dispatcher\nfrom .connection import HTTPConnection\n\nCONNECTIONS_DICT = typing.Dict[Origin, typing.List[HTTPConnection]]\n\n\nlogger = get_logger(__name__)\n\n\nclass NullSemaphore(BaseSemaphore):\n async def acquire(self, timeout: float = None) -> None:\n return\n\n def release(self) -> None:\n return\n\n\nclass ConnectionStore:\n \"\"\"\n We need to maintain collections of connections in a way that allows us to:\n\n * Lookup connections by origin.\n * Iterate over connections by insertion time.\n * Return the total number of connections.\n \"\"\"\n\n def __init__(self) -> None:\n self.all: typing.Dict[HTTPConnection, float] = {}\n self.by_origin: typing.Dict[Origin, typing.Dict[HTTPConnection, float]] = {}\n\n def pop_by_origin(\n self, origin: Origin, http2_only: bool = False\n ) -> typing.Optional[HTTPConnection]:\n try:\n connections = self.by_origin[origin]\n except KeyError:\n return None\n\n connection = next(reversed(list(connections.keys())))\n if http2_only and not connection.is_http2:\n return None\n\n del connections[connection]\n if not connections:\n del self.by_origin[origin]\n del self.all[connection]\n\n return connection\n\n def add(self, connection: HTTPConnection) -> None:\n self.all[connection] = 0.0\n try:\n self.by_origin[connection.origin][connection] = 0.0\n except KeyError:\n self.by_origin[connection.origin] = {connection: 0.0}\n\n def remove(self, connection: HTTPConnection) -> None:\n del self.all[connection]\n del self.by_origin[connection.origin][connection]\n if not self.by_origin[connection.origin]:\n del self.by_origin[connection.origin]\n\n def clear(self) -> None:\n self.all.clear()\n self.by_origin.clear()\n\n def __iter__(self) -> typing.Iterator[HTTPConnection]:\n return iter(self.all.keys())\n\n def __len__(self) -> int:\n return len(self.all)\n\n\nclass ConnectionPool(Dispatcher):\n KEEP_ALIVE_EXPIRY = 5.0\n\n def __init__(\n self,\n *,\n verify: VerifyTypes = True,\n cert: CertTypes = None,\n trust_env: bool = None,\n pool_limits: PoolLimits = DEFAULT_POOL_LIMITS,\n http2: bool = False,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n uds: typing.Optional[str] = None,\n ):\n self.ssl = SSLConfig(verify=verify, cert=cert, trust_env=trust_env, http2=http2)\n self.pool_limits = pool_limits\n self.is_closed = False\n self.uds = uds\n\n self.keepalive_connections = ConnectionStore()\n self.active_connections = ConnectionStore()\n\n self.backend = lookup_backend(backend)\n self.next_keepalive_check = 0.0\n\n @property\n def max_connections(self) -> BaseSemaphore:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_max_connections\"):\n limit = self.pool_limits.hard_limit\n if limit:\n self._max_connections = self.backend.create_semaphore(\n limit, exc_class=PoolTimeout\n )\n else:\n self._max_connections = NullSemaphore()\n\n return self._max_connections\n\n @property\n def num_connections(self) -> int:\n return len(self.keepalive_connections) + len(self.active_connections)\n\n async def check_keepalive_expiry(self) -> None:\n now = self.backend.time()\n if now < self.next_keepalive_check:\n return\n self.next_keepalive_check = now + 1.0\n\n # Iterate through all the keep alive connections.\n # We create a list here to avoid any 'changed during iteration' errors.\n keepalives = list(self.keepalive_connections.all.keys())\n for connection in keepalives:\n if connection.expires_at is not None and now > connection.expires_at:\n self.keepalive_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n await self.check_keepalive_expiry()\n connection = await self.acquire_connection(\n origin=request.url.origin, timeout=timeout\n )\n try:\n response = await connection.send(request, timeout=timeout)\n except BaseException as exc:\n self.active_connections.remove(connection)\n self.max_connections.release()\n raise exc\n\n return response\n\n async def acquire_connection(\n self, origin: Origin, timeout: Timeout = None\n ) -> HTTPConnection:\n logger.trace(f\"acquire_connection origin={origin!r}\")\n connection = self.pop_connection(origin)\n\n if connection is None:\n pool_timeout = None if timeout is None else timeout.pool_timeout\n\n await self.max_connections.acquire(timeout=pool_timeout)\n connection = HTTPConnection(\n origin,\n ssl=self.ssl,\n backend=self.backend,\n release_func=self.release_connection,\n uds=self.uds,\n )\n logger.trace(f\"new_connection connection={connection!r}\")\n else:\n logger.trace(f\"reuse_connection connection={connection!r}\")\n\n self.active_connections.add(connection)\n\n return connection\n\n async def release_connection(self, connection: HTTPConnection) -> None:\n logger.trace(f\"release_connection connection={connection!r}\")\n if connection.is_closed:\n self.active_connections.remove(connection)\n self.max_connections.release()\n elif (\n self.pool_limits.soft_limit is not None\n and self.num_connections > self.pool_limits.soft_limit\n ):\n self.active_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n else:\n now = self.backend.time()\n connection.expires_at = now + self.KEEP_ALIVE_EXPIRY\n self.active_connections.remove(connection)\n self.keepalive_connections.add(connection)\n\n async def close(self) -> None:\n self.is_closed = True\n connections = list(self.keepalive_connections)\n self.keepalive_connections.clear()\n for connection in connections:\n await connection.close()\n\n def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:\n connection = self.active_connections.pop_by_origin(origin, http2_only=True)\n if connection is None:\n connection = self.keepalive_connections.pop_by_origin(origin)\n\n if connection is not None and connection.is_connection_dropped():\n self.max_connections.release()\n connection = None\n\n return connection\n",
"path": "httpx/dispatch/connection_pool.py"
}
] | [
{
"content": "import typing\n\nfrom ..backends.base import BaseSemaphore, ConcurrencyBackend, lookup_backend\nfrom ..config import (\n DEFAULT_POOL_LIMITS,\n CertTypes,\n PoolLimits,\n SSLConfig,\n Timeout,\n VerifyTypes,\n)\nfrom ..exceptions import PoolTimeout\nfrom ..models import Origin, Request, Response\nfrom ..utils import get_logger\nfrom .base import Dispatcher\nfrom .connection import HTTPConnection\n\nCONNECTIONS_DICT = typing.Dict[Origin, typing.List[HTTPConnection]]\n\n\nlogger = get_logger(__name__)\n\n\nclass NullSemaphore(BaseSemaphore):\n async def acquire(self, timeout: float = None) -> None:\n return\n\n def release(self) -> None:\n return\n\n\nclass ConnectionStore:\n \"\"\"\n We need to maintain collections of connections in a way that allows us to:\n\n * Lookup connections by origin.\n * Iterate over connections by insertion time.\n * Return the total number of connections.\n \"\"\"\n\n def __init__(self) -> None:\n self.all: typing.Dict[HTTPConnection, float] = {}\n self.by_origin: typing.Dict[Origin, typing.Dict[HTTPConnection, float]] = {}\n\n def pop_by_origin(\n self, origin: Origin, http2_only: bool = False\n ) -> typing.Optional[HTTPConnection]:\n try:\n connections = self.by_origin[origin]\n except KeyError:\n return None\n\n connection = next(reversed(list(connections.keys())))\n if http2_only and not connection.is_http2:\n return None\n\n del connections[connection]\n if not connections:\n del self.by_origin[origin]\n del self.all[connection]\n\n return connection\n\n def add(self, connection: HTTPConnection) -> None:\n self.all[connection] = 0.0\n try:\n self.by_origin[connection.origin][connection] = 0.0\n except KeyError:\n self.by_origin[connection.origin] = {connection: 0.0}\n\n def remove(self, connection: HTTPConnection) -> None:\n del self.all[connection]\n del self.by_origin[connection.origin][connection]\n if not self.by_origin[connection.origin]:\n del self.by_origin[connection.origin]\n\n def clear(self) -> None:\n self.all.clear()\n self.by_origin.clear()\n\n def __iter__(self) -> typing.Iterator[HTTPConnection]:\n return iter(self.all.keys())\n\n def __len__(self) -> int:\n return len(self.all)\n\n\nclass ConnectionPool(Dispatcher):\n KEEP_ALIVE_EXPIRY = 5.0\n\n def __init__(\n self,\n *,\n verify: VerifyTypes = True,\n cert: CertTypes = None,\n trust_env: bool = None,\n pool_limits: PoolLimits = DEFAULT_POOL_LIMITS,\n http2: bool = False,\n backend: typing.Union[str, ConcurrencyBackend] = \"auto\",\n uds: typing.Optional[str] = None,\n ):\n self.ssl = SSLConfig(verify=verify, cert=cert, trust_env=trust_env, http2=http2)\n self.pool_limits = pool_limits\n self.is_closed = False\n self.uds = uds\n\n self.keepalive_connections = ConnectionStore()\n self.active_connections = ConnectionStore()\n\n self.backend = lookup_backend(backend)\n self.next_keepalive_check = 0.0\n\n @property\n def max_connections(self) -> BaseSemaphore:\n # We do this lazily, to make sure backend autodetection always\n # runs within an async context.\n if not hasattr(self, \"_max_connections\"):\n limit = self.pool_limits.hard_limit\n if limit:\n self._max_connections = self.backend.create_semaphore(\n limit, exc_class=PoolTimeout\n )\n else:\n self._max_connections = NullSemaphore()\n\n return self._max_connections\n\n @property\n def num_connections(self) -> int:\n return len(self.keepalive_connections) + len(self.active_connections)\n\n async def check_keepalive_expiry(self) -> None:\n now = self.backend.time()\n if now < self.next_keepalive_check:\n return\n self.next_keepalive_check = now + 1.0\n\n # Iterate through all the keep alive connections.\n # We create a list here to avoid any 'changed during iteration' errors.\n keepalives = list(self.keepalive_connections.all.keys())\n for connection in keepalives:\n if connection.expires_at is not None and now > connection.expires_at:\n self.keepalive_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n\n async def send(self, request: Request, timeout: Timeout = None) -> Response:\n await self.check_keepalive_expiry()\n connection = await self.acquire_connection(\n origin=request.url.origin, timeout=timeout\n )\n try:\n response = await connection.send(request, timeout=timeout)\n except BaseException as exc:\n self.active_connections.remove(connection)\n self.max_connections.release()\n raise exc\n\n return response\n\n async def acquire_connection(\n self, origin: Origin, timeout: Timeout = None\n ) -> HTTPConnection:\n logger.trace(f\"acquire_connection origin={origin!r}\")\n connection = self.pop_connection(origin)\n\n if connection is None:\n pool_timeout = None if timeout is None else timeout.pool_timeout\n\n await self.max_connections.acquire(timeout=pool_timeout)\n connection = HTTPConnection(\n origin,\n ssl=self.ssl,\n backend=self.backend,\n release_func=self.release_connection,\n uds=self.uds,\n )\n logger.trace(f\"new_connection connection={connection!r}\")\n else:\n logger.trace(f\"reuse_connection connection={connection!r}\")\n\n self.active_connections.add(connection)\n\n return connection\n\n async def release_connection(self, connection: HTTPConnection) -> None:\n logger.trace(f\"release_connection connection={connection!r}\")\n if connection.is_closed:\n self.active_connections.remove(connection)\n self.max_connections.release()\n elif (\n self.pool_limits.soft_limit is not None\n and self.num_connections > self.pool_limits.soft_limit\n ):\n self.active_connections.remove(connection)\n self.max_connections.release()\n await connection.close()\n else:\n now = self.backend.time()\n connection.expires_at = now + self.KEEP_ALIVE_EXPIRY\n self.active_connections.remove(connection)\n self.keepalive_connections.add(connection)\n\n async def close(self) -> None:\n self.is_closed = True\n connections = list(self.keepalive_connections)\n self.keepalive_connections.clear()\n for connection in connections:\n self.max_connections.release()\n await connection.close()\n\n def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:\n connection = self.active_connections.pop_by_origin(origin, http2_only=True)\n if connection is None:\n connection = self.keepalive_connections.pop_by_origin(origin)\n\n if connection is not None and connection.is_connection_dropped():\n self.max_connections.release()\n connection = None\n\n return connection\n",
"path": "httpx/dispatch/connection_pool.py"
}
] | diff --git a/httpx/dispatch/connection_pool.py b/httpx/dispatch/connection_pool.py
index 545e204521..3e09b11017 100644
--- a/httpx/dispatch/connection_pool.py
+++ b/httpx/dispatch/connection_pool.py
@@ -206,6 +206,7 @@ async def close(self) -> None:
connections = list(self.keepalive_connections)
self.keepalive_connections.clear()
for connection in connections:
+ self.max_connections.release()
await connection.close()
def pop_connection(self, origin: Origin) -> typing.Optional[HTTPConnection]:
diff --git a/tests/dispatch/test_connection_pools.py b/tests/dispatch/test_connection_pools.py
index 5e1fce3e07..0ec12a7c6c 100644
--- a/tests/dispatch/test_connection_pools.py
+++ b/tests/dispatch/test_connection_pools.py
@@ -221,3 +221,28 @@ async def test_connection_closed_free_semaphore_on_acquire(server, restart):
response = await http.request("GET", server.url)
assert response.status_code == 200
+
+
[email protected]("async_environment")
+async def test_connection_pool_closed_close_keepalive_and_free_semaphore(server):
+ """
+ Closing the connection pool should close remaining keepalive connections and
+ release the max_connections semaphore.
+ """
+ http = ConnectionPool(pool_limits=httpx.PoolLimits(hard_limit=1))
+
+ async with http:
+ response = await http.request("GET", server.url)
+ await response.aread()
+ assert response.status_code == 200
+ assert len(http.keepalive_connections) == 1
+
+ assert len(http.keepalive_connections) == 0
+
+ # Perform a second round of requests to make sure the max_connections semaphore
+ # was released properly.
+
+ async with http:
+ response = await http.request("GET", server.url)
+ await response.aread()
+ assert response.status_code == 200
| Keepalive connections aren't released when closing the ConnectionPool
Hello. I am having an issue where it looks like connections aren't being closed correctly, and after i reach a number of requests equivalent to "hard_limit" of pool_limits, i get a PoolTimeout exception.
I tried upgrading to httpx==0.10.1, with no success.
Minimal example:
```
import httpx, asyncio, logging
from httpx import PoolLimits
from random import randint
queue = asyncio.Queue()
clients = [
httpx.AsyncClient(
http2=True,
pool_limits=PoolLimits(soft_limit=2, hard_limit=10),
cookies={'a': '123456789', 'b': '987654321'},
)
]
async def worker_loop(cid, client, queue):
while 1:
sub_id = await queue.get()
async with client as c:
r = await c.get(f'https://mywebsite.dummy/submission.php?id={sub_id}')
if r.status_code != 200:
print(cid, f'Got status code {r.status_code} while parsing {sub_id}')
return
async def main():
for i in range(2500):
await queue.put(randint(1, 80000000))
for k, v in enumerate(clients):
asyncio.create_task(worker_loop(k, v, queue))
while 1:
if queue.qsize() == 0:
await queue.put(randint(1, 80000000))
await asyncio.sleep(2)
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.stop()
```
I checked with netstat, and only one actual connection is opened to the IP address, so pooling seems to work fine.
I really cannot understand why. I even tried using the "aclose()" syntax, without the "async with" block, but no difference at all.
|
wright-group__WrightTools-899 | [
{
"content": "\"\"\"Dataset base class.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\n\nimport numpy as np\n\nimport h5py\n\nfrom . import exceptions as wt_exceptions\nfrom . import kit as wt_kit\nfrom . import units as wt_units\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n \"\"\"Array-like data container.\"\"\"\n\n _instances = {}\n class_name = \"Dataset\"\n\n def __getitem__(self, index):\n if not hasattr(index, \"__iter__\"):\n index = [index]\n index = wt_kit.valid_index(index, self.shape)\n return super().__getitem__(index)\n\n def __iadd__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] += value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] += value\n\n self.chunkwise(f, value=value)\n return self\n\n def __imul__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] *= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] *= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __ipow__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] **= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] **= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __isub__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] -= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] -= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __itruediv__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] /= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] /= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __new__(cls, parent, id, **kwargs):\n \"\"\"New object formation handler.\"\"\"\n fullpath = parent.fullpath + h5py.h5i.get_name(id).decode()\n fullpath = fullpath.replace(\"//\", \"/\")\n if fullpath in cls._instances.keys():\n return cls._instances[fullpath]\n else:\n instance = super(Dataset, cls).__new__(cls)\n cls.__init__(instance, parent, id, **kwargs)\n cls._instances[fullpath] = instance\n return instance\n\n def __repr__(self):\n return \"<WrightTools.{0} '{1}' at {2}>\".format(\n self.class_name, self.natural_name, self.fullpath\n )\n\n def __setitem__(self, index, value):\n self._clear_array_attributes_cache()\n return super().__setitem__(index, value)\n\n def _clear_array_attributes_cache(self):\n if \"max\" in self.attrs.keys():\n del self.attrs[\"max\"]\n if \"min\" in self.attrs.keys():\n del self.attrs[\"min\"]\n if \"argmax\" in self.attrs.keys():\n del self.attrs[\"argmax\"]\n if \"argmin\" in self.attrs.keys():\n del self.attrs[\"argmin\"]\n\n @property\n def _leaf(self):\n out = self.natural_name\n if self.units is not None:\n out += \" ({0})\".format(self.units)\n out += \" {0}\".format(self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def fullpath(self):\n \"\"\"Full path: file and internal structure.\"\"\"\n return self.parent.filepath + \"::\" + self.name\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs[\"name\"]\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n self.attrs[\"name\"] = value\n self._natural_name = None\n\n @property\n def parent(self):\n \"\"\"Parent.\"\"\"\n return self._parent\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def units(self):\n \"\"\"Units.\"\"\"\n if \"units\" in self.attrs.keys():\n # This try-except here for compatibility with v1.0.0 of WT5 format\n try:\n self.attrs[\"units\"] = self.attrs[\"units\"].decode()\n except AttributeError:\n pass # already a string, not bytes\n return self.attrs[\"units\"]\n return None\n\n @units.setter\n def units(self, value):\n \"\"\"Set units.\"\"\"\n if value is None:\n if \"units\" in self.attrs.keys():\n self.attrs.pop(\"units\")\n else:\n try:\n self.attrs[\"units\"] = value\n except AttributeError:\n self.attrs[\"units\"] = value\n\n def argmax(self):\n \"\"\"Index of the maximum, ignorning nans.\"\"\"\n if \"argmax\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmax(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmax\"] = idxs[np.nanargmax(vals)]\n return tuple(self.attrs[\"argmax\"])\n\n def argmin(self):\n \"\"\"Index of the minimum, ignoring nans.\"\"\"\n if \"argmin\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmin(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmin\"] = idxs[np.nanargmin(vals)]\n return tuple(self.attrs[\"argmin\"])\n\n def chunkwise(self, func, *args, **kwargs):\n \"\"\"Execute a function for each chunk in the dataset.\n\n Order of excecution is not guaranteed.\n\n Parameters\n ----------\n func : function\n Function to execute. First two arguments must be dataset,\n slices.\n args (optional)\n Additional (unchanging) arguments passed to func.\n kwargs (optional)\n Additional (unchanging) keyword arguments passed to func.\n\n Returns\n -------\n collections OrderedDict\n Dictionary of index: function output. Index is to lowest corner\n of each chunk.\n \"\"\"\n out = collections.OrderedDict()\n for s in self.slices():\n key = tuple(sss.start for sss in s)\n out[key] = func(self, s, *args, **kwargs)\n self._clear_array_attributes_cache()\n return out\n\n def clip(self, min=None, max=None, replace=np.nan):\n \"\"\"Clip values outside of a defined range.\n\n Parameters\n ----------\n min : number (optional)\n New channel minimum. Default is None.\n max : number (optional)\n New channel maximum. Default is None.\n replace : number or 'value' (optional)\n Replace behavior. Default is nan.\n \"\"\"\n if max is None:\n max = self.max()\n if min is None:\n min = self.min()\n\n def f(dataset, s, min, max, replace):\n if hasattr(min, \"shape\"):\n min = min[wt_kit.valid_index(s, min.shape)]\n if hasattr(max, \"shape\"):\n max = max[wt_kit.valid_index(s, max.shape)]\n if hasattr(replace, \"shape\"):\n replace = replace[wt_kit.valid_index(s, replace.shape)]\n arr = dataset[s]\n if replace == \"value\":\n dataset[s] = np.clip(arr, min, max)\n else:\n arr[arr < min] = replace\n arr[arr > max] = replace\n dataset[s] = arr\n\n self.chunkwise(f, min=min, max=max, replace=replace)\n\n def convert(self, destination_units):\n \"\"\"Convert units.\n\n Parameters\n ----------\n destination_units : string (optional)\n Units to convert into.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if self.units is None:\n return\n\n def f(dataset, s, destination_units):\n dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)\n\n self.chunkwise(f, destination_units=destination_units)\n self.units = destination_units\n\n def log(self, base=np.e, floor=None):\n \"\"\"Take the log of the entire dataset.\n\n Parameters\n ----------\n base : number (optional)\n Base of log. Default is e.\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, base, floor):\n arr = dataset[s]\n arr = np.log(arr)\n if base != np.e:\n arr /= np.log(base)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, base=base, floor=floor)\n\n def log10(self, floor=None):\n \"\"\"Take the log base 10 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log10(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def log2(self, floor=None):\n \"\"\"Take the log base 2 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log2(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def max(self):\n \"\"\"Maximum, ignorning nans.\"\"\"\n if \"max\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmax(dataset[s])\n\n self.attrs[\"max\"] = np.nanmax(list(self.chunkwise(f).values()))\n return self.attrs[\"max\"]\n\n def min(self):\n \"\"\"Minimum, ignoring nans.\"\"\"\n if \"min\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmin(dataset[s])\n\n self.attrs[\"min\"] = np.nanmin(list(self.chunkwise(f).values()))\n return self.attrs[\"min\"]\n\n def slices(self):\n \"\"\"Returns a generator yielding tuple of slice objects.\n\n Order is not guaranteed.\n \"\"\"\n if self.chunks is None:\n yield tuple(slice(None, s) for s in self.shape)\n else:\n ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))\n for idx in np.ndindex(ceilings): # could also use itertools.product\n out = []\n for i, c, s in zip(idx, self.chunks, self.shape):\n start = i * c\n stop = min(start + c, s + 1)\n out.append(slice(start, stop, 1))\n yield tuple(out)\n\n def symmetric_root(self, root=2):\n def f(dataset, s, root):\n dataset[s] = np.sign(dataset[s]) * (np.abs(dataset[s]) ** (1 / root))\n\n self.chunkwise(f, root=root)\n",
"path": "WrightTools/_dataset.py"
}
] | [
{
"content": "\"\"\"Dataset base class.\"\"\"\n\n\n# --- import --------------------------------------------------------------------------------------\n\n\nimport collections\n\nimport numpy as np\n\nimport h5py\n\nfrom . import exceptions as wt_exceptions\nfrom . import kit as wt_kit\nfrom . import units as wt_units\n\n\n# --- class ---------------------------------------------------------------------------------------\n\n\nclass Dataset(h5py.Dataset):\n \"\"\"Array-like data container.\"\"\"\n\n _instances = {}\n class_name = \"Dataset\"\n\n def __getitem__(self, index):\n if not hasattr(index, \"__iter__\"):\n index = [index]\n index = wt_kit.valid_index(index, self.shape)\n return super().__getitem__(index)\n\n def __iadd__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] += value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] += value\n\n self.chunkwise(f, value=value)\n return self\n\n def __imul__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] *= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] *= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __ipow__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] **= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] **= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __isub__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] -= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] -= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __itruediv__(self, value):\n def f(dataset, s, value):\n if hasattr(value, \"shape\"):\n dataset[s] /= value[wt_kit.valid_index(s, value.shape)]\n else:\n dataset[s] /= value\n\n self.chunkwise(f, value=value)\n return self\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def __new__(cls, parent, id, **kwargs):\n \"\"\"New object formation handler.\"\"\"\n fullpath = parent.fullpath + h5py.h5i.get_name(id).decode()\n fullpath = fullpath.replace(\"//\", \"/\")\n if fullpath in cls._instances.keys():\n return cls._instances[fullpath]\n else:\n instance = super(Dataset, cls).__new__(cls)\n cls.__init__(instance, parent, id, **kwargs)\n cls._instances[fullpath] = instance\n return instance\n\n def __repr__(self):\n return \"<WrightTools.{0} '{1}' at {2}>\".format(\n self.class_name, self.natural_name, self.fullpath\n )\n\n def __setitem__(self, index, value):\n self._clear_array_attributes_cache()\n return super().__setitem__(index, value)\n\n def _clear_array_attributes_cache(self):\n if \"max\" in self.attrs.keys():\n del self.attrs[\"max\"]\n if \"min\" in self.attrs.keys():\n del self.attrs[\"min\"]\n if \"argmax\" in self.attrs.keys():\n del self.attrs[\"argmax\"]\n if \"argmin\" in self.attrs.keys():\n del self.attrs[\"argmin\"]\n\n @property\n def _leaf(self):\n out = self.natural_name\n if self.size == 1:\n out += f\" = {self.points}\"\n if self.units is not None:\n out += \" ({0})\".format(self.units)\n if self.size != 1:\n out += \" {0}\".format(self.shape)\n return out\n\n @property\n def full(self):\n arr = self[:]\n for i in range(arr.ndim):\n if arr.shape[i] == 1:\n arr = np.repeat(arr, self.parent.shape[i], axis=i)\n return arr\n\n @property\n def fullpath(self):\n \"\"\"Full path: file and internal structure.\"\"\"\n return self.parent.filepath + \"::\" + self.name\n\n @property\n def natural_name(self):\n \"\"\"Natural name of the dataset. May be different from name.\"\"\"\n try:\n assert self._natural_name is not None\n except (AssertionError, AttributeError):\n self._natural_name = self.attrs[\"name\"]\n finally:\n return self._natural_name\n\n @natural_name.setter\n def natural_name(self, value):\n self.attrs[\"name\"] = value\n self._natural_name = None\n\n @property\n def parent(self):\n \"\"\"Parent.\"\"\"\n return self._parent\n\n @property\n def points(self):\n \"\"\"Squeezed array.\"\"\"\n return np.squeeze(self[:])\n\n @property\n def units(self):\n \"\"\"Units.\"\"\"\n if \"units\" in self.attrs.keys():\n # This try-except here for compatibility with v1.0.0 of WT5 format\n try:\n self.attrs[\"units\"] = self.attrs[\"units\"].decode()\n except AttributeError:\n pass # already a string, not bytes\n return self.attrs[\"units\"]\n return None\n\n @units.setter\n def units(self, value):\n \"\"\"Set units.\"\"\"\n if value is None:\n if \"units\" in self.attrs.keys():\n self.attrs.pop(\"units\")\n else:\n try:\n self.attrs[\"units\"] = value\n except AttributeError:\n self.attrs[\"units\"] = value\n\n def argmax(self):\n \"\"\"Index of the maximum, ignorning nans.\"\"\"\n if \"argmax\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmax(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmax\"] = idxs[np.nanargmax(vals)]\n return tuple(self.attrs[\"argmax\"])\n\n def argmin(self):\n \"\"\"Index of the minimum, ignoring nans.\"\"\"\n if \"argmin\" not in self.attrs.keys():\n\n def f(dataset, s):\n arr = dataset[s]\n try:\n amin = np.nanargmin(arr)\n except ValueError:\n amin = 0\n idx = np.unravel_index(amin, arr.shape)\n val = arr[idx]\n return (tuple(i + (ss.start if ss.start else 0) for i, ss in zip(idx, s)), val)\n\n chunk_res = self.chunkwise(f)\n idxs = [i[0] for i in chunk_res.values()]\n vals = [i[1] for i in chunk_res.values()]\n self.attrs[\"argmin\"] = idxs[np.nanargmin(vals)]\n return tuple(self.attrs[\"argmin\"])\n\n def chunkwise(self, func, *args, **kwargs):\n \"\"\"Execute a function for each chunk in the dataset.\n\n Order of excecution is not guaranteed.\n\n Parameters\n ----------\n func : function\n Function to execute. First two arguments must be dataset,\n slices.\n args (optional)\n Additional (unchanging) arguments passed to func.\n kwargs (optional)\n Additional (unchanging) keyword arguments passed to func.\n\n Returns\n -------\n collections OrderedDict\n Dictionary of index: function output. Index is to lowest corner\n of each chunk.\n \"\"\"\n out = collections.OrderedDict()\n for s in self.slices():\n key = tuple(sss.start for sss in s)\n out[key] = func(self, s, *args, **kwargs)\n self._clear_array_attributes_cache()\n return out\n\n def clip(self, min=None, max=None, replace=np.nan):\n \"\"\"Clip values outside of a defined range.\n\n Parameters\n ----------\n min : number (optional)\n New channel minimum. Default is None.\n max : number (optional)\n New channel maximum. Default is None.\n replace : number or 'value' (optional)\n Replace behavior. Default is nan.\n \"\"\"\n if max is None:\n max = self.max()\n if min is None:\n min = self.min()\n\n def f(dataset, s, min, max, replace):\n if hasattr(min, \"shape\"):\n min = min[wt_kit.valid_index(s, min.shape)]\n if hasattr(max, \"shape\"):\n max = max[wt_kit.valid_index(s, max.shape)]\n if hasattr(replace, \"shape\"):\n replace = replace[wt_kit.valid_index(s, replace.shape)]\n arr = dataset[s]\n if replace == \"value\":\n dataset[s] = np.clip(arr, min, max)\n else:\n arr[arr < min] = replace\n arr[arr > max] = replace\n dataset[s] = arr\n\n self.chunkwise(f, min=min, max=max, replace=replace)\n\n def convert(self, destination_units):\n \"\"\"Convert units.\n\n Parameters\n ----------\n destination_units : string (optional)\n Units to convert into.\n \"\"\"\n if not wt_units.is_valid_conversion(self.units, destination_units):\n kind = wt_units.kind(self.units)\n valid = list(wt_units.dicts[kind].keys())\n raise wt_exceptions.UnitsError(valid, destination_units)\n if self.units is None:\n return\n\n def f(dataset, s, destination_units):\n dataset[s] = wt_units.converter(dataset[s], dataset.units, destination_units)\n\n self.chunkwise(f, destination_units=destination_units)\n self.units = destination_units\n\n def log(self, base=np.e, floor=None):\n \"\"\"Take the log of the entire dataset.\n\n Parameters\n ----------\n base : number (optional)\n Base of log. Default is e.\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, base, floor):\n arr = dataset[s]\n arr = np.log(arr)\n if base != np.e:\n arr /= np.log(base)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, base=base, floor=floor)\n\n def log10(self, floor=None):\n \"\"\"Take the log base 10 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log10(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def log2(self, floor=None):\n \"\"\"Take the log base 2 of the entire dataset.\n\n Parameters\n ----------\n floor : number (optional)\n Clip values below floor after log. Default is None.\n \"\"\"\n\n def f(dataset, s, floor):\n arr = dataset[s]\n arr = np.log2(arr)\n if floor is not None:\n arr[arr < floor] = floor\n dataset[s] = arr\n\n self.chunkwise(f, floor=floor)\n\n def max(self):\n \"\"\"Maximum, ignorning nans.\"\"\"\n if \"max\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmax(dataset[s])\n\n self.attrs[\"max\"] = np.nanmax(list(self.chunkwise(f).values()))\n return self.attrs[\"max\"]\n\n def min(self):\n \"\"\"Minimum, ignoring nans.\"\"\"\n if \"min\" not in self.attrs.keys():\n\n def f(dataset, s):\n return np.nanmin(dataset[s])\n\n self.attrs[\"min\"] = np.nanmin(list(self.chunkwise(f).values()))\n return self.attrs[\"min\"]\n\n def slices(self):\n \"\"\"Returns a generator yielding tuple of slice objects.\n\n Order is not guaranteed.\n \"\"\"\n if self.chunks is None:\n yield tuple(slice(None, s) for s in self.shape)\n else:\n ceilings = tuple(-(-s // c) for s, c in zip(self.shape, self.chunks))\n for idx in np.ndindex(ceilings): # could also use itertools.product\n out = []\n for i, c, s in zip(idx, self.chunks, self.shape):\n start = i * c\n stop = min(start + c, s + 1)\n out.append(slice(start, stop, 1))\n yield tuple(out)\n\n def symmetric_root(self, root=2):\n def f(dataset, s, root):\n dataset[s] = np.sign(dataset[s]) * (np.abs(dataset[s]) ** (1 / root))\n\n self.chunkwise(f, root=root)\n",
"path": "WrightTools/_dataset.py"
}
] | diff --git a/WrightTools/_dataset.py b/WrightTools/_dataset.py
index 84b00379e..a898f1b2a 100644
--- a/WrightTools/_dataset.py
+++ b/WrightTools/_dataset.py
@@ -117,9 +117,12 @@ def _clear_array_attributes_cache(self):
@property
def _leaf(self):
out = self.natural_name
+ if self.size == 1:
+ out += f" = {self.points}"
if self.units is not None:
out += " ({0})".format(self.units)
- out += " {0}".format(self.shape)
+ if self.size != 1:
+ out += " {0}".format(self.shape)
return out
@property
| print_tree should print value and units for variables with size 1
|
pulp__pulpcore-4156 | [
{
"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))\n return done.pop().result()\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n",
"path": "pulpcore/download/base.py"
}
] | [
{
"content": "from gettext import gettext as _\n\nimport asyncio\nfrom collections import namedtuple\nimport logging\nimport os\nimport tempfile\nfrom urllib.parse import urlsplit\n\nfrom pulpcore.app import pulp_hashlib\nfrom pulpcore.app.models import Artifact\nfrom pulpcore.exceptions import (\n DigestValidationError,\n SizeValidationError,\n TimeoutException,\n UnsupportedDigestValidationError,\n)\n\n\nlog = logging.getLogger(__name__)\n\n\nDownloadResult = namedtuple(\"DownloadResult\", [\"url\", \"artifact_attributes\", \"path\", \"headers\"])\n\"\"\"\nArgs:\n url (str): The url corresponding with the download.\n path (str): The absolute path to the saved file\n artifact_attributes (dict): Contains keys corresponding with\n :class:`~pulpcore.plugin.models.Artifact` fields. This includes the computed digest values\n along with size information.\n headers (aiohttp.multidict.MultiDict): HTTP response headers. The keys are header names. The\n values are header content. None when not using the HttpDownloader or sublclass.\n\"\"\"\n\n\nclass BaseDownloader:\n \"\"\"\n The base class of all downloaders, providing digest calculation, validation, and file handling.\n\n This is an abstract class and is meant to be subclassed. Subclasses are required to implement\n the :meth:`~pulpcore.plugin.download.BaseDownloader.run` method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` and schedule it.\n\n 2. Schedule :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Passing all downloaded data the into\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` allows the file digests to\n be computed while data is written to disk. The digests computed are required if the download is\n to be saved as an :class:`~pulpcore.plugin.models.Artifact` which avoids having to re-read the\n data later.\n\n The :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data` method by default\n writes to a random file in the current working directory.\n\n The call to :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` ensures that all\n data written to the file-like object is quiesced to disk before the file-like object has\n `close()` called on it.\n\n Attributes:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n path (str): The full path to the file containing the downloaded data.\n \"\"\"\n\n def __init__(\n self,\n url,\n expected_digests=None,\n expected_size=None,\n semaphore=None,\n *args,\n **kwargs,\n ):\n \"\"\"\n Create a BaseDownloader object. This is expected to be called by all subclasses.\n\n Args:\n url (str): The url to download.\n expected_digests (dict): Keyed on the algorithm name provided by hashlib and stores the\n value of the expected digest. e.g. {'md5': '912ec803b2ce49e4a541068d495ab570'}\n expected_size (int): The number of bytes the download is expected to have.\n semaphore (asyncio.Semaphore): A semaphore the downloader must acquire before running.\n Useful for limiting the number of outstanding downloaders in various ways.\n \"\"\"\n\n self.url = url\n self._writer = None\n self.path = None\n self.expected_digests = expected_digests\n self.expected_size = expected_size\n if semaphore:\n self.semaphore = semaphore\n else:\n self.semaphore = asyncio.Semaphore() # This will always be acquired\n self._digests = {}\n self._size = 0\n if self.expected_digests:\n if not set(self.expected_digests).intersection(set(Artifact.DIGEST_FIELDS)):\n raise UnsupportedDigestValidationError(\n _(\n \"Content at the URL '{}' does not contain at least one trusted hasher which\"\n \" is specified in the 'ALLOWED_CONTENT_CHECKSUMS' setting ({}). The\"\n \" downloader expected one of the following hashers: {}\"\n ).format(self.url, Artifact.DIGEST_FIELDS, set(self.expected_digests))\n )\n\n def _ensure_writer_has_open_file(self):\n \"\"\"\n Create a temporary file on demand.\n\n Create a temporary file when it's actually used,\n allowing plugin writers to instantiate many downloaders in memory.\n \"\"\"\n if not self._writer:\n filename = urlsplit(self.url).path.split(\"/\")[-1]\n # linux allows any character except NUL or / in a filename and has a length limit of\n # 255. Making it urlencoding-aware would be nice, but not critical, because urlencoded\n # paths should be OK\n is_legal_filename = filename and (len(filename) <= 243) # 255 - prefix length\n # if the filename isn't legal then we just fall back to no suffix (random name)\n suffix = \"-\" + filename if is_legal_filename else None\n # write the file to the current working directory with a random prefix and the\n # desired suffix. we always want the random prefix as it is possible to download\n # the same filename from two different URLs, and the files may not be the same.\n self._writer = tempfile.NamedTemporaryFile(dir=\".\", suffix=suffix, delete=False)\n self.path = self._writer.name\n self._digests = {n: pulp_hashlib.new(n) for n in Artifact.DIGEST_FIELDS}\n self._size = 0\n\n async def handle_data(self, data):\n \"\"\"\n A coroutine that writes data to the file object and compute its digests.\n\n All subclassed downloaders are expected to pass all data downloaded to this method. Similar\n to the hashlib docstring, repeated calls are equivalent to a single call with\n the concatenation of all the arguments: m.handle_data(a); m.handle_data(b) is equivalent to\n m.handle_data(a+b).\n\n Args:\n data (bytes): The data to be handled by the downloader.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.write(data)\n self._record_size_and_digests_for_data(data)\n\n async def finalize(self):\n \"\"\"\n A coroutine to flush downloaded data, close the file writer, and validate the data.\n\n All subclasses are required to call this method after all data has been passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n self._ensure_writer_has_open_file()\n self._writer.flush()\n os.fsync(self._writer.fileno())\n self._writer.close()\n self._writer = None\n self.validate_digests()\n self.validate_size()\n log.debug(f\"Downloaded file from {self.url}\")\n\n def fetch(self):\n \"\"\"\n Run the download synchronously and return the `DownloadResult`.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Exception: Any fatal exception emitted during downloading\n \"\"\"\n result = asyncio.get_event_loop().run_until_complete(self.run())\n return result\n\n def _record_size_and_digests_for_data(self, data):\n \"\"\"\n Record the size and digest for an available chunk of data.\n\n Args:\n data (bytes): The data to have its size and digest values recorded.\n \"\"\"\n for algorithm in self._digests.values():\n algorithm.update(data)\n self._size += len(data)\n\n @property\n def artifact_attributes(self):\n \"\"\"\n A property that returns a dictionary with size and digest information. The keys of this\n dictionary correspond with :class:`~pulpcore.plugin.models.Artifact` fields.\n \"\"\"\n attributes = {\"size\": self._size}\n for algorithm in self._digests:\n attributes[algorithm] = self._digests[algorithm].hexdigest()\n return attributes\n\n def validate_digests(self):\n \"\"\"\n Validate all digests validate if ``expected_digests`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.DigestValidationError`: When any of the ``expected_digest``\n values don't match the digest of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_digests:\n for algorithm, expected_digest in self.expected_digests.items():\n actual_digest = self._digests[algorithm].hexdigest()\n if actual_digest != expected_digest:\n raise DigestValidationError(actual_digest, expected_digest, url=self.url)\n\n def validate_size(self):\n \"\"\"\n Validate the size if ``expected_size`` is set\n\n Raises:\n :class:`~pulpcore.exceptions.SizeValidationError`: When the ``expected_size`` value\n doesn't match the size of the data passed to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n \"\"\"\n if self.expected_size:\n actual_size = self._size\n expected_size = self.expected_size\n if actual_size != expected_size:\n raise SizeValidationError(actual_size, expected_size, url=self.url)\n\n async def run(self, extra_data=None):\n \"\"\"\n Run the downloader with concurrency restriction.\n\n This method acquires `self.semaphore` before calling the actual download implementation\n contained in `_run()`. This ensures that the semaphore stays acquired even as the `backoff`\n decorator on `_run()`, handles backoff-and-retry logic.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult` from `_run()`.\n\n \"\"\"\n async with self.semaphore:\n try:\n return await self._run(extra_data=extra_data)\n except asyncio.TimeoutError:\n raise TimeoutException(self.url)\n\n async def _run(self, extra_data=None):\n \"\"\"\n Run the downloader.\n\n This is a coroutine that asyncio can schedule to complete downloading. Subclasses are\n required to implement this method and do two things:\n\n 1. Pass all downloaded data to\n :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n 2. Call :meth:`~pulpcore.plugin.download.BaseDownloader.finalize` after all data has\n been delivered to :meth:`~pulpcore.plugin.download.BaseDownloader.handle_data`.\n\n It is also expected that the subclass implementation return a\n :class:`~pulpcore.plugin.download.DownloadResult` object. The\n ``artifact_attributes`` value of the\n :class:`~pulpcore.plugin.download.DownloadResult` is usually set to the\n :attr:`~pulpcore.plugin.download.BaseDownloader.artifact_attributes` property value.\n\n This method is called from :meth:`~pulpcore.plugin.download.BaseDownloader.run` which\n handles concurrency restriction. Thus, by the time this method is called, the download can\n occur without violating the concurrency restriction.\n\n Args:\n extra_data (dict): Extra data passed to the downloader.\n\n Returns:\n :class:`~pulpcore.plugin.download.DownloadResult`\n\n Raises:\n Validation errors could be emitted when subclassed implementations call\n :meth:`~pulpcore.plugin.download.BaseDownloader.finalize`.\n \"\"\"\n raise NotImplementedError(\"Subclasses must define a _run() method that returns a coroutine\")\n",
"path": "pulpcore/download/base.py"
}
] | diff --git a/CHANGES/4107.bugfix b/CHANGES/4107.bugfix
new file mode 100644
index 0000000000..73fb394cc7
--- /dev/null
+++ b/CHANGES/4107.bugfix
@@ -0,0 +1 @@
+Updated the downloader's fetch method to comply with Python 3.11.
diff --git a/pulpcore/download/base.py b/pulpcore/download/base.py
index 59223deac3..7bbaa4f4c0 100644
--- a/pulpcore/download/base.py
+++ b/pulpcore/download/base.py
@@ -182,8 +182,8 @@ def fetch(self):
Raises:
Exception: Any fatal exception emitted during downloading
"""
- done, _ = asyncio.get_event_loop().run_until_complete(asyncio.wait([self.run()]))
- return done.pop().result()
+ result = asyncio.get_event_loop().run_until_complete(self.run())
+ return result
def _record_size_and_digests_for_data(self, data):
"""
| BaseDownloader.fetch passes coroutine to asyncio.wait which is forbidden in python 3.11
Python 3.8 deprecated passing coroutines to `asyncio.wait` and Python 3.11 will now [raise an error](https://github.com/python/cpython/blob/a6313d78f21f79ca64dedd38e637509dc530a1b6/Lib/asyncio/tasks.py#L414C13-L414C13). This causes the BaseDownloader.fetch call to fail on Python 3.11 https://github.com/pulp/pulpcore/blob/9dbcc8810f97f53297a933df2e1b74cdc324a8ea/pulpcore/download/base.py#L185 .
Python provides the solution in the error message: "Passing coroutines is forbidden, use tasks explicitly."
I believe this can be fixed by explicitly converting the coroutine to a task using asyncio's `create_task`
|
ludwig-ai__ludwig-1702 | [
{
"content": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport logging\nfrom abc import ABC, abstractmethod, abstractstaticmethod\nfrom typing import Any, Dict, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom ludwig.constants import COLUMN, HIDDEN, LENGTHS, LOGITS, LOSS, NAME, PREDICTIONS, PROBABILITIES, PROC_COLUMN, TYPE\nfrom ludwig.decoders.registry import get_decoder_cls\nfrom ludwig.encoders.registry import get_encoder_cls\nfrom ludwig.features.feature_utils import compute_feature_hash, get_input_size_with_dependencies\nfrom ludwig.modules.fully_connected_modules import FCStack\nfrom ludwig.modules.loss_modules import get_loss_cls\nfrom ludwig.modules.metric_registry import get_metric_classes, get_metric_cls\nfrom ludwig.modules.reduction_modules import SequenceReducer\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.metric_utils import get_scalar_from_ludwig_metric\nfrom ludwig.utils.misc_utils import merge_dict\nfrom ludwig.utils.torch_utils import LudwigModule\nfrom ludwig.utils.types import DataFrame\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeatureMixin(ABC):\n \"\"\"Parent class for feature mixins.\n\n Feature mixins support preprocessing functionality shared across input and output features.\n \"\"\"\n\n @abstractstaticmethod\n def type() -> str:\n \"\"\"Returns the type of feature this mixin supports.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_defaults() -> Dict[str, Any]:\n \"\"\"Returns dict of preprocessing defaults.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_schema() -> Dict[str, Any]:\n \"\"\"Returns schema for the preprocessing configuration.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def cast_column(column: DataFrame, backend) -> DataFrame:\n \"\"\"Returns a copy of the dataset column for the given feature, potentially after a type cast.\n\n Args:\n column: Pandas column of values.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def get_feature_meta(column: DataFrame, preprocessing_parameters: Dict[str, Any], backend) -> Dict[str, Any]:\n \"\"\"Returns a dictionary of feature metadata.\n\n Args:\n column: Pandas column of values.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def add_feature_data(\n feature_config: Dict[str, Any],\n input_df: DataFrame,\n proc_df: Dict[str, DataFrame],\n metadata: Dict[str, Any],\n preprocessing_parameters: Dict[str, Any],\n backend, # Union[Backend, str]\n skip_save_processed_input: bool,\n ) -> None:\n \"\"\"Runs preprocessing on the input_df and stores results in the proc_df and metadata dictionaries.\n\n Args:\n feature_config: Feature configuration.\n input_df: Pandas column of values.\n proc_df: Dict of processed columns of data. Feature data is added to this.\n metadata: Metadata returned by get_feature_meta(). Additional information may be added to this.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n skip_save_processed_input: Whether to skip saving the processed input.\n \"\"\"\n raise NotImplementedError\n\n\nclass PredictModule(torch.nn.Module):\n \"\"\"Base class for all modules that convert model outputs to predictions.\n\n Explicit member variables needed here for scripting, as Torchscript will not be able to recognize global variables\n during scripting.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.logits_key = LOGITS\n\n\nclass BaseFeature:\n \"\"\"Base class for all features.\n\n Note that this class is not-cooperative (does not forward kwargs), so when constructing feature class hierarchies,\n there should be only one parent class that derives from base feature. Other functionality should be put into mixin\n classes to avoid the diamond pattern.\n \"\"\"\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__()\n\n if NAME not in feature:\n raise ValueError(\"Missing feature name\")\n self.feature_name = feature[NAME]\n\n if COLUMN not in feature:\n feature[COLUMN] = self.feature_name\n self.column = feature[COLUMN]\n\n if PROC_COLUMN not in feature:\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n self.proc_column = feature[PROC_COLUMN]\n\n def overwrite_defaults(self, feature):\n attributes = set(self.__dict__.keys())\n attributes.update(self.__class__.__dict__.keys())\n\n for k in feature.keys():\n if k in attributes:\n if isinstance(feature[k], dict) and hasattr(self, k) and isinstance(getattr(self, k), dict):\n setattr(self, k, merge_dict(getattr(self, k), feature[k]))\n else:\n setattr(self, k, feature[k])\n\n\nclass InputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all input features.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def create_sample_input(self):\n # Used by get_model_inputs(), which is used for tracing-based torchscript generation.\n return torch.rand([2, *self.input_shape]).to(self.input_dtype)\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def initialize_encoder(self, encoder_parameters):\n return get_encoder_cls(self.type(), self.encoder)(**encoder_parameters)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n\nclass OutputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all output features.\"\"\"\n\n def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, \"OutputFeature\"], *args, **kwargs):\n \"\"\"Defines defaults, overwrites them based on the feature dictionary, and sets up dependencies.\n\n Any output feature can depend on one or more other output features. The `other_output_features` input dictionary\n should contain entries for any dependent output features, which is accomplished by constructing output features\n in topographically sorted order. Attributes of any dependent output features are used to properly initialize\n this feature's sizes.\n \"\"\"\n super().__init__(*args, feature=feature, **kwargs)\n\n self.reduce_input = None\n self.reduce_dependencies = None\n\n # List of feature names that this output feature is depdendent on.\n self.dependencies = []\n\n self.fc_layers = None\n self.num_fc_layers = 0\n self.output_size = 256\n self.use_bias = True\n self.weights_initializer = \"xavier_uniform\"\n self.bias_initializer = \"zeros\"\n self.norm = None\n self.norm_params = None\n self.activation = \"relu\"\n self.dropout = 0\n self.input_size = None\n\n self.overwrite_defaults(feature)\n\n logger.debug(\" output feature fully connected layers\")\n logger.debug(\" FCStack\")\n\n self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)\n\n self.fc_stack = FCStack(\n first_layer_input_size=self.input_size,\n layers=self.fc_layers,\n num_layers=self.num_fc_layers,\n default_output_size=self.output_size,\n default_use_bias=self.use_bias,\n default_weights_initializer=self.weights_initializer,\n default_bias_initializer=self.bias_initializer,\n default_norm=self.norm,\n default_norm_params=self.norm_params,\n default_activation=self.activation,\n default_dropout=self.dropout,\n )\n self._prediction_module = self.create_predict_module()\n\n # set up two sequence reducers, one for inputs and other for dependencies\n self.reduce_sequence_input = SequenceReducer(reduce_mode=self.reduce_input)\n if self.dependencies:\n self.dependency_reducers = torch.nn.ModuleDict()\n # todo: re-evaluate need for separate handling of `attention` reducer\n # currently this code does not support `attention`\n for dependency in self.dependencies:\n self.dependency_reducers[dependency] = SequenceReducer(reduce_mode=self.reduce_dependencies)\n\n def create_sample_output(self):\n return torch.rand(self.output_shape, dtype=self.get_output_dtype())\n\n @abstractmethod\n def get_prediction_set(self):\n \"\"\"Returns the set of prediction keys returned by this feature.\"\"\"\n raise NotImplementedError(\"OutputFeature is missing implementation for get_prediction_set.\")\n\n @classmethod\n @abstractmethod\n def get_output_dtype(cls):\n \"\"\"Returns the Tensor data type feature outputs.\"\"\"\n pass\n\n @property\n @abstractmethod\n def metric_functions(self) -> Dict:\n pass\n\n def initialize_decoder(self, decoder_parameters):\n decoder_parameters_copy = copy.copy(decoder_parameters)\n # Input to the decoder is the output feature's FC hidden layer.\n decoder_parameters_copy[\"input_size\"] = self.fc_stack.output_shape[-1]\n if \"decoder\" in decoder_parameters:\n decoder = decoder_parameters[\"decoder\"]\n else:\n decoder = self.decoder\n return get_decoder_cls(self.type(), decoder)(**decoder_parameters_copy)\n\n def train_loss(self, targets: Tensor, predictions: Dict[str, Tensor], feature_name):\n loss_class = type(self.train_loss_function)\n prediction_key = output_feature_utils.get_feature_concat_name(feature_name, loss_class.get_loss_inputs())\n return self.train_loss_function(predictions[prediction_key], targets)\n\n def eval_loss(self, targets: Tensor, predictions: Dict[str, Tensor]):\n loss_class = type(self.train_loss_function)\n prediction_key = loss_class.get_loss_inputs()\n return self.eval_loss_function(predictions[prediction_key].detach(), targets)\n\n def _setup_loss(self):\n loss_kwargs = self.loss_kwargs()\n self.train_loss_function = get_loss_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n self.eval_loss_function = get_metric_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n\n def _setup_metrics(self):\n # needed to shadow class variable\n self.metric_functions = {\n LOSS: self.eval_loss_function,\n **{\n name: cls(**self.loss_kwargs(), **self.metric_kwargs())\n for name, cls in get_metric_classes(self.type()).items()\n if cls.can_report(self)\n },\n }\n\n @abstractmethod\n def create_predict_module(self) -> PredictModule:\n \"\"\"Creates and returns a `nn.Module` that converts raw model outputs (logits) to predictions.\n\n Thos module is needed when generating the Torchscript model using scripting.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def prediction_module(self) -> PredictModule:\n \"\"\"Returns the PredictModule used to convert model outputs to predictions.\"\"\"\n return self._prediction_module\n\n def predictions(self, all_decoder_outputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n \"\"\"Computes actual predictions from the outputs of feature decoders.\n\n TODO(Justin): Consider refactoring this to accept feature-specific decoder outputs.\n\n Args:\n all_decoder_outputs: A dictionary of {feature name}::{tensor_name} -> output tensor.\n Returns:\n Dictionary of tensors with predictions as well as any additional tensors that may be\n necessary for computing evaluation metrics.\n \"\"\"\n return self.prediction_module(all_decoder_outputs, feature_name)\n\n @abstractmethod\n def logits(self, combiner_outputs: Dict[str, torch.Tensor], target=None, **kwargs) -> Dict[str, torch.Tensor]:\n \"\"\"Unpacks and feeds combiner_outputs to the decoder. Invoked as part of the output feature's forward pass.\n\n If target is not None, then we are in training.\n\n Args:\n combiner_outputs: Dictionary of tensors from the combiner's forward pass.\n Returns:\n Dictionary of decoder's output tensors (non-normalized), as well as any additional\n tensors that may be necessary for computing predictions or evaluation metrics.\n \"\"\"\n raise NotImplementedError(\"OutputFeature is missing logits() implementation.\")\n\n def loss_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of the loss class.\"\"\"\n return {}\n\n def metric_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of each metric class.\"\"\"\n return {}\n\n def update_metrics(self, targets: Tensor, predictions: Dict[str, Tensor]) -> None:\n \"\"\"Updates metrics with the given targets and predictions.\n\n Args:\n targets: Tensor with target values for this output feature.\n predictions: Dict of tensors returned by predictions().\n \"\"\"\n for _, metric_fn in self.metric_functions.items():\n metric_class = type(metric_fn)\n prediction_key = metric_class.get_inputs()\n # TODO(shreya): Metrics should ideally just move to the correct device\n # and not require the user to do this. This is a temporary fix. See\n # if this can be removed before merging the PR.\n metric_fn = metric_fn.to(predictions[prediction_key].device)\n metric_fn.update(predictions[prediction_key].detach(), targets)\n\n def get_metrics(self):\n metric_vals = {}\n for metric_name, metric_fn in self.metric_functions.items():\n try:\n metric_vals[metric_name] = get_scalar_from_ludwig_metric(metric_fn)\n except Exception as e:\n logger.error(f\"Caught exception computing metric: {metric_name}. Exception: {e}\")\n return metric_vals\n\n def reset_metrics(self):\n for _, metric_fn in self.metric_functions.items():\n if metric_fn is not None:\n metric_fn.reset()\n\n def forward(\n self,\n combiner_outputs: Dict[str, torch.Tensor],\n other_output_feature_outputs: Dict[str, torch.Tensor],\n mask: Optional[torch.Tensor] = None,\n target: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Forward pass that takes in output from the combiner, and passes it through to the decoder.\n\n Args:\n combiner_outputs: Dict of outputs from the combiner.\n other_output_feature_outputs: Dict of tensors from other output features. Used for resolving dependencies.\n mask: (Unused). Tensor for masking.\n target: Tensor with targets. During training, targets != None. During prediction, targets = None.\n\n Returns:\n Dict of output tensors, with at least 'last_hidden' and 'logits' as keys, as well as any additional tensor\n results from the decoder.\n \"\"\"\n # extract the combined hidden layer\n combiner_hidden = combiner_outputs[\"combiner_output\"]\n hidden = self.prepare_decoder_inputs(combiner_hidden, other_output_feature_outputs, mask=mask)\n\n # ================ Predictions ================\n logits_input = {HIDDEN: hidden}\n # pass supplemental data from encoders to decoder\n if \"encoder_output_state\" in combiner_outputs:\n logits_input[\"encoder_output_state\"] = combiner_outputs[\"encoder_output_state\"]\n if LENGTHS in combiner_outputs:\n logits_input[LENGTHS] = combiner_outputs[LENGTHS]\n\n logits = self.logits(logits_input, target=target)\n\n # For binary and numerical features, self.logits() is a tensor.\n # There are two special cases where self.logits() is a dict:\n # categorical\n # keys: logits, projection_input\n # sequence\n # keys: logits\n # TODO(Justin): Clean this up.\n if isinstance(logits, Tensor):\n logits = {\"logits\": logits}\n\n # For multi-class features, we must choose a consistent tuple subset.\n return {\n # last_hidden used for dependencies processing\n \"last_hidden\": hidden,\n **logits,\n }\n\n def overall_statistics_metadata(self):\n \"\"\"Additional metadata used to extend `training_set_metadata`.\n\n Used when calculating the overall statistics.\n \"\"\"\n return {}\n\n @property\n @abstractmethod\n def default_validation_metric(self):\n pass\n\n @abstractmethod\n def postprocess_predictions(\n self,\n result: Dict[str, Tensor],\n metadata: Dict[str, Any],\n output_directory: str,\n backend,\n ):\n raise NotImplementedError\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def output_specific_fully_connected(self, inputs, mask=None):\n feature_hidden = inputs\n original_feature_hidden = inputs\n\n # flatten inputs\n if len(original_feature_hidden.shape) > 2:\n feature_hidden = torch.reshape(feature_hidden, (-1, list(feature_hidden.shape)[-1]))\n\n # pass it through fc_stack\n feature_hidden = self.fc_stack(feature_hidden, mask=mask)\n feature_hidden_size = feature_hidden.shape[-1]\n\n # reshape back to original first and second dimension\n if len(original_feature_hidden.shape) > 2:\n sequence_length = original_feature_hidden.shape[1]\n feature_hidden = torch.reshape(feature_hidden, (-1, sequence_length, feature_hidden_size))\n\n return feature_hidden\n\n def prepare_decoder_inputs(\n self, combiner_hidden: Tensor, other_output_features: Dict[str, Tensor], mask=None\n ) -> Tensor:\n \"\"\"Takes the combiner output and the outputs of other outputs features computed so far and performs:\n\n - reduction of combiner outputs (if needed)\n - concatenating the outputs of dependent features (if needed)\n - output_specific fully connected layers (if needed)\n\n Args:\n combiner_hidden: hidden state of the combiner\n other_output_features: output tensors from other output features\n \"\"\"\n # ================ Reduce Inputs ================\n feature_hidden = combiner_hidden\n if self.reduce_input is not None and len(combiner_hidden.shape) > 2:\n feature_hidden = self.reduce_sequence_input(combiner_hidden)\n\n # ================ Concat Dependencies ================\n if self.dependencies:\n feature_hidden = output_feature_utils.concat_dependencies(\n self.column, self.dependencies, self.dependency_reducers, feature_hidden, other_output_features\n )\n\n # ================ Output-wise Fully Connected ================\n feature_hidden = self.output_specific_fully_connected(feature_hidden, mask=mask)\n\n return feature_hidden\n\n def flatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Converts the output of batch_predict to a 1D array.\"\"\"\n return df\n\n def unflatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Reshapes a flattened 1D array into its original shape.\"\"\"\n return df\n",
"path": "ludwig/features/base_feature.py"
}
] | [
{
"content": "# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport copy\nimport logging\nfrom abc import ABC, abstractmethod, abstractstaticmethod\nfrom typing import Any, Dict, Optional\n\nimport torch\nfrom torch import Tensor\n\nfrom ludwig.constants import COLUMN, HIDDEN, LENGTHS, LOGITS, LOSS, NAME, PREDICTIONS, PROBABILITIES, PROC_COLUMN, TYPE\nfrom ludwig.decoders.registry import get_decoder_cls\nfrom ludwig.encoders.registry import get_encoder_cls\nfrom ludwig.features.feature_utils import compute_feature_hash, get_input_size_with_dependencies\nfrom ludwig.modules.fully_connected_modules import FCStack\nfrom ludwig.modules.loss_modules import get_loss_cls\nfrom ludwig.modules.metric_registry import get_metric_classes, get_metric_cls\nfrom ludwig.modules.reduction_modules import SequenceReducer\nfrom ludwig.utils import output_feature_utils\nfrom ludwig.utils.metric_utils import get_scalar_from_ludwig_metric\nfrom ludwig.utils.misc_utils import merge_dict\nfrom ludwig.utils.torch_utils import LudwigModule\nfrom ludwig.utils.types import DataFrame\n\nlogger = logging.getLogger(__name__)\n\n\nclass BaseFeatureMixin(ABC):\n \"\"\"Parent class for feature mixins.\n\n Feature mixins support preprocessing functionality shared across input and output features.\n \"\"\"\n\n @abstractstaticmethod\n def type() -> str:\n \"\"\"Returns the type of feature this mixin supports.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_defaults() -> Dict[str, Any]:\n \"\"\"Returns dict of preprocessing defaults.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def preprocessing_schema() -> Dict[str, Any]:\n \"\"\"Returns schema for the preprocessing configuration.\"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def cast_column(column: DataFrame, backend) -> DataFrame:\n \"\"\"Returns a copy of the dataset column for the given feature, potentially after a type cast.\n\n Args:\n column: Pandas column of values.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def get_feature_meta(column: DataFrame, preprocessing_parameters: Dict[str, Any], backend) -> Dict[str, Any]:\n \"\"\"Returns a dictionary of feature metadata.\n\n Args:\n column: Pandas column of values.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n \"\"\"\n raise NotImplementedError\n\n @abstractstaticmethod\n def add_feature_data(\n feature_config: Dict[str, Any],\n input_df: DataFrame,\n proc_df: Dict[str, DataFrame],\n metadata: Dict[str, Any],\n preprocessing_parameters: Dict[str, Any],\n backend, # Union[Backend, str]\n skip_save_processed_input: bool,\n ) -> None:\n \"\"\"Runs preprocessing on the input_df and stores results in the proc_df and metadata dictionaries.\n\n Args:\n feature_config: Feature configuration.\n input_df: Pandas column of values.\n proc_df: Dict of processed columns of data. Feature data is added to this.\n metadata: Metadata returned by get_feature_meta(). Additional information may be added to this.\n preprocessing_parameters: Preprocessing configuration for this feature.\n backend: (Union[Backend, str]) Backend to use for feature data processing.\n skip_save_processed_input: Whether to skip saving the processed input.\n \"\"\"\n raise NotImplementedError\n\n\nclass PredictModule(torch.nn.Module):\n \"\"\"Base class for all modules that convert model outputs to predictions.\n\n Explicit member variables needed here for scripting, as Torchscript will not be able to recognize global variables\n during scripting.\n \"\"\"\n\n def __init__(self):\n super().__init__()\n self.predictions_key = PREDICTIONS\n self.probabilities_key = PROBABILITIES\n self.logits_key = LOGITS\n\n\nclass BaseFeature:\n \"\"\"Base class for all features.\n\n Note that this class is not-cooperative (does not forward kwargs), so when constructing feature class hierarchies,\n there should be only one parent class that derives from base feature. Other functionality should be put into mixin\n classes to avoid the diamond pattern.\n \"\"\"\n\n def __init__(self, feature, *args, **kwargs):\n super().__init__()\n\n if NAME not in feature:\n raise ValueError(\"Missing feature name\")\n self.feature_name = feature[NAME]\n\n if COLUMN not in feature:\n feature[COLUMN] = self.feature_name\n self.column = feature[COLUMN]\n\n if PROC_COLUMN not in feature:\n feature[PROC_COLUMN] = compute_feature_hash(feature)\n self.proc_column = feature[PROC_COLUMN]\n\n def overwrite_defaults(self, feature):\n attributes = set(self.__dict__.keys())\n attributes.update(self.__class__.__dict__.keys())\n\n for k in feature.keys():\n if k in attributes:\n if isinstance(feature[k], dict) and hasattr(self, k) and isinstance(getattr(self, k), dict):\n setattr(self, k, merge_dict(getattr(self, k), feature[k]))\n else:\n setattr(self, k, feature[k])\n\n\nclass InputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all input features.\"\"\"\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def create_sample_input(self):\n # Used by get_model_inputs(), which is used for tracing-based torchscript generation.\n return torch.rand([2, *self.input_shape]).to(self.input_dtype)\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(input_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def initialize_encoder(self, encoder_parameters):\n return get_encoder_cls(self.type(), self.encoder)(**encoder_parameters)\n\n @staticmethod\n def create_preproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n\nclass OutputFeature(BaseFeature, LudwigModule, ABC):\n \"\"\"Parent class for all output features.\"\"\"\n\n def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, \"OutputFeature\"], *args, **kwargs):\n \"\"\"Defines defaults, overwrites them based on the feature dictionary, and sets up dependencies.\n\n Any output feature can depend on one or more other output features. The `other_output_features` input dictionary\n should contain entries for any dependent output features, which is accomplished by constructing output features\n in topographically sorted order. Attributes of any dependent output features are used to properly initialize\n this feature's sizes.\n \"\"\"\n super().__init__(*args, feature=feature, **kwargs)\n\n self.reduce_input = None\n self.reduce_dependencies = None\n\n # List of feature names that this output feature is depdendent on.\n self.dependencies = []\n\n self.fc_layers = None\n self.num_fc_layers = 0\n self.output_size = 256\n self.use_bias = True\n self.weights_initializer = \"xavier_uniform\"\n self.bias_initializer = \"zeros\"\n self.norm = None\n self.norm_params = None\n self.activation = \"relu\"\n self.dropout = 0\n self.input_size = None\n\n self.overwrite_defaults(feature)\n\n logger.debug(\" output feature fully connected layers\")\n logger.debug(\" FCStack\")\n\n self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)\n feature[\"input_size\"] = self.input_size # needed for future overrides\n\n self.fc_stack = FCStack(\n first_layer_input_size=self.input_size,\n layers=self.fc_layers,\n num_layers=self.num_fc_layers,\n default_output_size=self.output_size,\n default_use_bias=self.use_bias,\n default_weights_initializer=self.weights_initializer,\n default_bias_initializer=self.bias_initializer,\n default_norm=self.norm,\n default_norm_params=self.norm_params,\n default_activation=self.activation,\n default_dropout=self.dropout,\n )\n self._prediction_module = self.create_predict_module()\n\n # set up two sequence reducers, one for inputs and other for dependencies\n self.reduce_sequence_input = SequenceReducer(reduce_mode=self.reduce_input)\n if self.dependencies:\n self.dependency_reducers = torch.nn.ModuleDict()\n # todo: re-evaluate need for separate handling of `attention` reducer\n # currently this code does not support `attention`\n for dependency in self.dependencies:\n self.dependency_reducers[dependency] = SequenceReducer(reduce_mode=self.reduce_dependencies)\n\n def create_sample_output(self):\n return torch.rand(self.output_shape, dtype=self.get_output_dtype())\n\n @abstractmethod\n def get_prediction_set(self):\n \"\"\"Returns the set of prediction keys returned by this feature.\"\"\"\n raise NotImplementedError(\"OutputFeature is missing implementation for get_prediction_set.\")\n\n @classmethod\n @abstractmethod\n def get_output_dtype(cls):\n \"\"\"Returns the Tensor data type feature outputs.\"\"\"\n pass\n\n @property\n @abstractmethod\n def metric_functions(self) -> Dict:\n pass\n\n def initialize_decoder(self, decoder_parameters):\n decoder_parameters_copy = copy.copy(decoder_parameters)\n # Input to the decoder is the output feature's FC hidden layer.\n decoder_parameters_copy[\"input_size\"] = self.fc_stack.output_shape[-1]\n if \"decoder\" in decoder_parameters:\n decoder = decoder_parameters[\"decoder\"]\n else:\n decoder = self.decoder\n return get_decoder_cls(self.type(), decoder)(**decoder_parameters_copy)\n\n def train_loss(self, targets: Tensor, predictions: Dict[str, Tensor], feature_name):\n loss_class = type(self.train_loss_function)\n prediction_key = output_feature_utils.get_feature_concat_name(feature_name, loss_class.get_loss_inputs())\n return self.train_loss_function(predictions[prediction_key], targets)\n\n def eval_loss(self, targets: Tensor, predictions: Dict[str, Tensor]):\n loss_class = type(self.train_loss_function)\n prediction_key = loss_class.get_loss_inputs()\n return self.eval_loss_function(predictions[prediction_key].detach(), targets)\n\n def _setup_loss(self):\n loss_kwargs = self.loss_kwargs()\n self.train_loss_function = get_loss_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n self.eval_loss_function = get_metric_cls(self.type(), self.loss[TYPE])(**loss_kwargs)\n\n def _setup_metrics(self):\n # needed to shadow class variable\n self.metric_functions = {\n LOSS: self.eval_loss_function,\n **{\n name: cls(**self.loss_kwargs(), **self.metric_kwargs())\n for name, cls in get_metric_classes(self.type()).items()\n if cls.can_report(self)\n },\n }\n\n @abstractmethod\n def create_predict_module(self) -> PredictModule:\n \"\"\"Creates and returns a `nn.Module` that converts raw model outputs (logits) to predictions.\n\n Thos module is needed when generating the Torchscript model using scripting.\n \"\"\"\n raise NotImplementedError()\n\n @property\n def prediction_module(self) -> PredictModule:\n \"\"\"Returns the PredictModule used to convert model outputs to predictions.\"\"\"\n return self._prediction_module\n\n def predictions(self, all_decoder_outputs: Dict[str, torch.Tensor], feature_name: str) -> Dict[str, torch.Tensor]:\n \"\"\"Computes actual predictions from the outputs of feature decoders.\n\n TODO(Justin): Consider refactoring this to accept feature-specific decoder outputs.\n\n Args:\n all_decoder_outputs: A dictionary of {feature name}::{tensor_name} -> output tensor.\n Returns:\n Dictionary of tensors with predictions as well as any additional tensors that may be\n necessary for computing evaluation metrics.\n \"\"\"\n return self.prediction_module(all_decoder_outputs, feature_name)\n\n @abstractmethod\n def logits(self, combiner_outputs: Dict[str, torch.Tensor], target=None, **kwargs) -> Dict[str, torch.Tensor]:\n \"\"\"Unpacks and feeds combiner_outputs to the decoder. Invoked as part of the output feature's forward pass.\n\n If target is not None, then we are in training.\n\n Args:\n combiner_outputs: Dictionary of tensors from the combiner's forward pass.\n Returns:\n Dictionary of decoder's output tensors (non-normalized), as well as any additional\n tensors that may be necessary for computing predictions or evaluation metrics.\n \"\"\"\n raise NotImplementedError(\"OutputFeature is missing logits() implementation.\")\n\n def loss_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of the loss class.\"\"\"\n return {}\n\n def metric_kwargs(self) -> Dict[str, Any]:\n \"\"\"Returns arguments that are used to instantiate an instance of each metric class.\"\"\"\n return {}\n\n def update_metrics(self, targets: Tensor, predictions: Dict[str, Tensor]) -> None:\n \"\"\"Updates metrics with the given targets and predictions.\n\n Args:\n targets: Tensor with target values for this output feature.\n predictions: Dict of tensors returned by predictions().\n \"\"\"\n for _, metric_fn in self.metric_functions.items():\n metric_class = type(metric_fn)\n prediction_key = metric_class.get_inputs()\n # TODO(shreya): Metrics should ideally just move to the correct device\n # and not require the user to do this. This is a temporary fix. See\n # if this can be removed before merging the PR.\n metric_fn = metric_fn.to(predictions[prediction_key].device)\n metric_fn.update(predictions[prediction_key].detach(), targets)\n\n def get_metrics(self):\n metric_vals = {}\n for metric_name, metric_fn in self.metric_functions.items():\n try:\n metric_vals[metric_name] = get_scalar_from_ludwig_metric(metric_fn)\n except Exception as e:\n logger.error(f\"Caught exception computing metric: {metric_name}. Exception: {e}\")\n return metric_vals\n\n def reset_metrics(self):\n for _, metric_fn in self.metric_functions.items():\n if metric_fn is not None:\n metric_fn.reset()\n\n def forward(\n self,\n combiner_outputs: Dict[str, torch.Tensor],\n other_output_feature_outputs: Dict[str, torch.Tensor],\n mask: Optional[torch.Tensor] = None,\n target: Optional[torch.Tensor] = None,\n ) -> Dict[str, torch.Tensor]:\n \"\"\"Forward pass that takes in output from the combiner, and passes it through to the decoder.\n\n Args:\n combiner_outputs: Dict of outputs from the combiner.\n other_output_feature_outputs: Dict of tensors from other output features. Used for resolving dependencies.\n mask: (Unused). Tensor for masking.\n target: Tensor with targets. During training, targets != None. During prediction, targets = None.\n\n Returns:\n Dict of output tensors, with at least 'last_hidden' and 'logits' as keys, as well as any additional tensor\n results from the decoder.\n \"\"\"\n # extract the combined hidden layer\n combiner_hidden = combiner_outputs[\"combiner_output\"]\n hidden = self.prepare_decoder_inputs(combiner_hidden, other_output_feature_outputs, mask=mask)\n\n # ================ Predictions ================\n logits_input = {HIDDEN: hidden}\n # pass supplemental data from encoders to decoder\n if \"encoder_output_state\" in combiner_outputs:\n logits_input[\"encoder_output_state\"] = combiner_outputs[\"encoder_output_state\"]\n if LENGTHS in combiner_outputs:\n logits_input[LENGTHS] = combiner_outputs[LENGTHS]\n\n logits = self.logits(logits_input, target=target)\n\n # For binary and numerical features, self.logits() is a tensor.\n # There are two special cases where self.logits() is a dict:\n # categorical\n # keys: logits, projection_input\n # sequence\n # keys: logits\n # TODO(Justin): Clean this up.\n if isinstance(logits, Tensor):\n logits = {\"logits\": logits}\n\n # For multi-class features, we must choose a consistent tuple subset.\n return {\n # last_hidden used for dependencies processing\n \"last_hidden\": hidden,\n **logits,\n }\n\n def overall_statistics_metadata(self):\n \"\"\"Additional metadata used to extend `training_set_metadata`.\n\n Used when calculating the overall statistics.\n \"\"\"\n return {}\n\n @property\n @abstractmethod\n def default_validation_metric(self):\n pass\n\n @abstractmethod\n def postprocess_predictions(\n self,\n result: Dict[str, Tensor],\n metadata: Dict[str, Any],\n output_directory: str,\n backend,\n ):\n raise NotImplementedError\n\n @staticmethod\n def create_postproc_module(metadata: Dict[str, Any]) -> torch.nn.Module:\n raise NotImplementedError(\"Torchscript tracing not supported for feature\")\n\n @staticmethod\n @abstractmethod\n def update_config_with_metadata(output_feature, feature_metadata, *args, **kwargs):\n pass\n\n @staticmethod\n @abstractmethod\n def calculate_overall_stats(predictions, targets, train_set_metadata):\n pass\n\n @staticmethod\n @abstractmethod\n def populate_defaults(input_feature):\n pass\n\n def output_specific_fully_connected(self, inputs, mask=None):\n feature_hidden = inputs\n original_feature_hidden = inputs\n\n # flatten inputs\n if len(original_feature_hidden.shape) > 2:\n feature_hidden = torch.reshape(feature_hidden, (-1, list(feature_hidden.shape)[-1]))\n\n # pass it through fc_stack\n feature_hidden = self.fc_stack(feature_hidden, mask=mask)\n feature_hidden_size = feature_hidden.shape[-1]\n\n # reshape back to original first and second dimension\n if len(original_feature_hidden.shape) > 2:\n sequence_length = original_feature_hidden.shape[1]\n feature_hidden = torch.reshape(feature_hidden, (-1, sequence_length, feature_hidden_size))\n\n return feature_hidden\n\n def prepare_decoder_inputs(\n self, combiner_hidden: Tensor, other_output_features: Dict[str, Tensor], mask=None\n ) -> Tensor:\n \"\"\"Takes the combiner output and the outputs of other outputs features computed so far and performs:\n\n - reduction of combiner outputs (if needed)\n - concatenating the outputs of dependent features (if needed)\n - output_specific fully connected layers (if needed)\n\n Args:\n combiner_hidden: hidden state of the combiner\n other_output_features: output tensors from other output features\n \"\"\"\n # ================ Reduce Inputs ================\n feature_hidden = combiner_hidden\n if self.reduce_input is not None and len(combiner_hidden.shape) > 2:\n feature_hidden = self.reduce_sequence_input(combiner_hidden)\n\n # ================ Concat Dependencies ================\n if self.dependencies:\n feature_hidden = output_feature_utils.concat_dependencies(\n self.column, self.dependencies, self.dependency_reducers, feature_hidden, other_output_features\n )\n\n # ================ Output-wise Fully Connected ================\n feature_hidden = self.output_specific_fully_connected(feature_hidden, mask=mask)\n\n return feature_hidden\n\n def flatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Converts the output of batch_predict to a 1D array.\"\"\"\n return df\n\n def unflatten(self, df: DataFrame) -> DataFrame:\n \"\"\"Reshapes a flattened 1D array into its original shape.\"\"\"\n return df\n",
"path": "ludwig/features/base_feature.py"
}
] | diff --git a/ludwig/features/base_feature.py b/ludwig/features/base_feature.py
index 1d7d2c5a1e1..251c3131daa 100644
--- a/ludwig/features/base_feature.py
+++ b/ludwig/features/base_feature.py
@@ -217,6 +217,7 @@ def __init__(self, feature: Dict[str, Any], other_output_features: Dict[str, "Ou
logger.debug(" FCStack")
self.input_size = get_input_size_with_dependencies(self.input_size, self.dependencies, other_output_features)
+ feature["input_size"] = self.input_size # needed for future overrides
self.fc_stack = FCStack(
first_layer_input_size=self.input_size,
| Shape mismatch when introducing multiple levels of dependencies
**Describe the bug**
When introducing multiple levels of dependencies, the shape of the _concatenated hidden states_ does not match the _input size for the dense layer of the output feature_.
In my case, the text output feature `qty_frac` depends on text output feature `summary`, and numerical output feature `qty` in turn depends on `qty_frac`.
I get the following error when running `ludwig train`:
```python-traceback
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**To Reproduce**
Minimal, reproducible example using bash and docker as only dependencies:
```bash
#!/usr/bin/env bash
FEATURE_LIST=$(
docker run -i mikefarah/yq -o json -I 0 e '.' - <<EOF
- name: document
type: text
- name: summary
type: text
- name: qty_frac
type: text
- name: qty
type: numerical
EOF
)
mkdir /tmp/ludwig-debug
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
synthesize_dataset \
--features $FEATURE_LIST \
--dataset_size 10 \
--output_path /workdir/synthetic_data.csv
cat <<EOF >/tmp/ludwig-debug/config.yml
input_features:
- name: document
type: text
level: word
output_features:
- name: summary
type: text
level: word
decoder: generator
- name: qty_frac
type: text
level: word
decoder: generator
dependencies:
- summary
- name: qty
type: numerical
dependencies:
- qty_frac
EOF
docker run \
-it \
-v /tmp/ludwig-debug/:/workdir \
ludwigai/ludwig:nightly \
train \
--dataset /workdir/synthetic_data.csv \
--config_file /workdir/config.yml \
--output_directory /workdir/results
```
**Expected behavior**
Training starts without error.
**Screenshots**
Excerpt from the traceback:
```python-traceback
File "/usr/local/lib/python3.7/site-packages/ludwig/features/numerical_feature.py", line 269, in logits
return self.decoder_obj(hidden)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/decoders/generic_decoders.py", line 58, in forward
return self.dense(inputs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/ludwig/utils/torch_utils.py", line 212, in forward
output = torch.squeeze(self.dense(input), dim=-1)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/module.py", line 1102, in _call_impl
return forward_call(*input, **kwargs)
File "/usr/local/lib/python3.7/site-packages/torch/nn/modules/linear.py", line 103, in forward
return F.linear(input, self.weight, self.bias)
File "/usr/local/lib/python3.7/site-packages/torch/nn/functional.py", line 1848, in linear
return torch._C._nn.linear(input, weight, bias)
RuntimeError: mat1 and mat2 shapes cannot be multiplied (6x768 and 512x1)
```
**Environment:**
See reproducible example, run in environment with:
- bash: `GNU bash, version 5.0.17(1)-release (x86_64-pc-linux-gnu)`
- docker: `Docker version 20.10.11+azure-3, build dea9396e184290f638ea873c76db7c80efd5a1d2`
The `ludwigai/ludwig:nightly` Docker image was built from main at 89d18365c41c4ded68edd2095349ce4a6caf5d18.
|
pulp__pulpcore-4010 | [
{
"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n",
"path": "pulpcore/app/viewsets/replica.py"
}
] | [
{
"content": "\"\"\"\nViewSet for replicating repositories and distributions from an upstream Pulp\n\"\"\"\nfrom django.conf import settings\nfrom drf_spectacular.utils import extend_schema\nfrom rest_framework import mixins\nfrom rest_framework.decorators import action\n\nfrom pulpcore.app.models import TaskGroup, UpstreamPulp\nfrom pulpcore.app.serializers import AsyncOperationResponseSerializer, UpstreamPulpSerializer\nfrom pulpcore.app.viewsets import NamedModelViewSet\nfrom pulpcore.app.response import TaskGroupOperationResponse\nfrom pulpcore.app.tasks import replicate_distributions\nfrom pulpcore.tasking.tasks import dispatch\n\n\nclass UpstreamPulpViewSet(\n NamedModelViewSet,\n mixins.CreateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.ListModelMixin,\n mixins.DestroyModelMixin,\n mixins.UpdateModelMixin,\n):\n \"\"\"API for configuring an upstream Pulp to replicate. This API is provided as a tech preview.\"\"\"\n\n queryset = UpstreamPulp.objects.all()\n endpoint_name = \"upstream-pulps\"\n serializer_class = UpstreamPulpSerializer\n ordering = \"-pulp_created\"\n\n @extend_schema(\n summary=\"Replicate\",\n description=\"Trigger an asynchronous repository replication task group. This API is \"\n \"provided as a tech preview.\",\n request=None,\n responses={202: AsyncOperationResponseSerializer},\n )\n @action(detail=True, methods=[\"post\"])\n def replicate(self, request, pk):\n \"\"\"\n Triggers an asynchronous repository replication operation.\n \"\"\"\n server = UpstreamPulp.objects.get(pk=pk)\n task_group = TaskGroup.objects.create(description=f\"Replication of {server.name}\")\n\n uri = \"/api/v3/servers/\"\n if settings.DOMAIN_ENABLED:\n uri = f\"/{request.domain.name}{uri}\"\n\n dispatch(\n replicate_distributions,\n exclusive_resources=[uri],\n kwargs={\"server_pk\": pk},\n task_group=task_group,\n )\n\n return TaskGroupOperationResponse(task_group, request)\n",
"path": "pulpcore/app/viewsets/replica.py"
}
] | diff --git a/CHANGES/3995.bugfix b/CHANGES/3995.bugfix
new file mode 100644
index 0000000000..acfdd5ad32
--- /dev/null
+++ b/CHANGES/3995.bugfix
@@ -0,0 +1 @@
+Fix api schema of the `upstream_pulp_replicate` operation requiring no body.
diff --git a/pulpcore/app/viewsets/replica.py b/pulpcore/app/viewsets/replica.py
index 5d9d8aa8b7..666f671b38 100644
--- a/pulpcore/app/viewsets/replica.py
+++ b/pulpcore/app/viewsets/replica.py
@@ -33,6 +33,7 @@ class UpstreamPulpViewSet(
summary="Replicate",
description="Trigger an asynchronous repository replication task group. This API is "
"provided as a tech preview.",
+ request=None,
responses={202: AsyncOperationResponseSerializer},
)
@action(detail=True, methods=["post"])
| RESTAPI document fix for Upstream Pulp Replication API
**Version**
Pulp installed through the Python modules.
"core:3.28.0"
"certguard:3.28.0"
"file:3.28.0"
"python:3.28.0"
"rpm:3.28.0"
**Describe the bug**
Why the attributes of **upstream_pulps_create**/**update** is mentioned again in the **upstream_pulps_replicate" document? Are those attributes (base_url, api_root, domain,...) used at time making an API request "https://PULP-SERVER/pulp/api/v3/upstream_pulps/{object_id}/replicate/"?
**To Reproduce**
None.
**Expected behavior**
A fix is required in the REST API document.
**Additional context**
Create Upstream Pulp API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_create
Upstream Replication API document: https://docs.pulpproject.org/pulpcore/restapi.html#tag/Upstream-Pulps/operation/upstream_pulps_replicate
|
searx__searx-672 | [
{
"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n",
"path": "searx/plugins/self_info.py"
}
] | [
{
"content": "'''\nsearx is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nsearx is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with searx. If not, see < http://www.gnu.org/licenses/ >.\n\n(C) 2015 by Adam Tauber, <[email protected]>\n'''\nfrom flask_babel import gettext\nimport re\nname = \"Self Informations\"\ndescription = gettext('Displays your IP if the query is \"ip\" and your user agent if the query contains \"user agent\".')\ndefault_on = True\n\n\n# Self User Agent regex\np = re.compile('.*user[ -]agent.*', re.IGNORECASE)\n\n\n# attach callback to the post search hook\n# request: flask request object\n# ctx: the whole local context of the pre search hook\ndef post_search(request, ctx):\n if ctx['search'].pageno > 1:\n return True\n if ctx['search'].query == 'ip':\n x_forwarded_for = request.headers.getlist(\"X-Forwarded-For\")\n if x_forwarded_for:\n ip = x_forwarded_for[0]\n else:\n ip = request.remote_addr\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ip)\n elif p.match(ctx['search'].query):\n ua = request.user_agent\n ctx['search'].result_container.answers.clear()\n ctx['search'].result_container.answers.add(ua)\n return True\n",
"path": "searx/plugins/self_info.py"
}
] | diff --git a/searx/plugins/self_info.py b/searx/plugins/self_info.py
index 75cbae0dea..438274c410 100644
--- a/searx/plugins/self_info.py
+++ b/searx/plugins/self_info.py
@@ -29,6 +29,8 @@
# request: flask request object
# ctx: the whole local context of the pre search hook
def post_search(request, ctx):
+ if ctx['search'].pageno > 1:
+ return True
if ctx['search'].query == 'ip':
x_forwarded_for = request.headers.getlist("X-Forwarded-For")
if x_forwarded_for:
diff --git a/tests/unit/test_plugins.py b/tests/unit/test_plugins.py
index 98d39ec14d..b8a8980cfc 100644
--- a/tests/unit/test_plugins.py
+++ b/tests/unit/test_plugins.py
@@ -52,23 +52,39 @@ def test_PluginStore_init(self):
request = Mock(user_plugins=store.plugins,
remote_addr='127.0.0.1')
request.headers.getlist.return_value = []
- ctx = get_search_mock(query='ip')
+ ctx = get_search_mock(query='ip', pageno=1)
store.call('post_search', request, ctx)
self.assertTrue('127.0.0.1' in ctx['search'].result_container.answers)
+ ctx = get_search_mock(query='ip', pageno=2)
+ store.call('post_search', request, ctx)
+ self.assertFalse('127.0.0.1' in ctx['search'].result_container.answers)
+
# User agent test
request = Mock(user_plugins=store.plugins,
user_agent='Mock')
request.headers.getlist.return_value = []
- ctx = get_search_mock(query='user-agent')
+ ctx = get_search_mock(query='user-agent', pageno=1)
store.call('post_search', request, ctx)
self.assertTrue('Mock' in ctx['search'].result_container.answers)
- ctx = get_search_mock(query='user-agent')
+ ctx = get_search_mock(query='user-agent', pageno=2)
+ store.call('post_search', request, ctx)
+ self.assertFalse('Mock' in ctx['search'].result_container.answers)
+
+ ctx = get_search_mock(query='user-agent', pageno=1)
store.call('post_search', request, ctx)
self.assertTrue('Mock' in ctx['search'].result_container.answers)
- ctx = get_search_mock(query='What is my User-Agent?')
+ ctx = get_search_mock(query='user-agent', pageno=2)
+ store.call('post_search', request, ctx)
+ self.assertFalse('Mock' in ctx['search'].result_container.answers)
+
+ ctx = get_search_mock(query='What is my User-Agent?', pageno=1)
store.call('post_search', request, ctx)
self.assertTrue('Mock' in ctx['search'].result_container.answers)
+
+ ctx = get_search_mock(query='What is my User-Agent?', pageno=2)
+ store.call('post_search', request, ctx)
+ self.assertFalse('Mock' in ctx['search'].result_container.answers)
| Infinite scroll: answer are repeated on each page
How to reproduce : search for ["user agent"](https://searx.me/?q=user+agent) with Infinite scroll activated.
Should the answer be disabled except the first page ? or should Infinite Scroll hide the answer ?
I vote for the first option : disabled answers except on the first page on the server side.
|
fedora-infra__bodhi-4148 | [
{
"content": "# Copyright © 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\nimport re\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import (\n Bug, Build, ContentType, Package, Release, Update, UpdateStatus, UpdateType, User)\nfrom bodhi.server.tasks import work_on_bugs_task\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(dbsession, rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg, release=rel)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n changelog = build.get_changelog(lastupdate=True)\n closing_bugs = []\n if changelog:\n log.debug(\"Adding changelog to update notes.\")\n notes = f\"\"\"Automatic update for {bnvr}.\n\n##### **Changelog**\n\n```\n{changelog}\n```\"\"\"\n\n for b in re.finditer(config.get('bz_regex'), changelog, re.IGNORECASE):\n idx = int(b.group(1))\n log.debug(f'Adding bug #{idx} to the update.')\n bug = Bug.get(idx)\n if bug is None:\n bug = Bug(bug_id=idx)\n dbsession.add(bug)\n dbsession.flush()\n if bug not in closing_bugs:\n closing_bugs.append(bug)\n else:\n notes = f\"Automatic update for {bnvr}.\"\n update = Update(\n release=rel,\n builds=[build],\n bugs=closing_bugs,\n notes=notes,\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.pending,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n update.add_tag(update.release.pending_signing_tag)\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Flushing changes to the database.\")\n dbsession.flush()\n\n # Obsolete older updates which may be stuck in testing due to failed gating\n try:\n update.obsolete_older_updates(dbsession)\n except Exception as e:\n log.error(f'Problem obsoleting older updates: {e}')\n\n # This must be run after dbsession is closed so changes are committed to db\n alias = update.alias\n work_on_bugs_task.delay(alias, closing_bugs)\n",
"path": "bodhi/server/consumers/automatic_updates.py"
}
] | [
{
"content": "# Copyright © 2019 Red Hat, Inc. and others.\n#\n# This file is part of Bodhi.\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc., 51\n# Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\"\"\"\nThe Bodhi handler that creates updates automatically from tagged builds.\n\nThis module is responsible for the process of creating updates when builds are\ntagged with certain tags.\n\"\"\"\n\nimport logging\nimport re\n\nimport fedora_messaging\n\nfrom bodhi.server import buildsys\nfrom bodhi.server.config import config\nfrom bodhi.server.models import (\n Bug, Build, ContentType, Package, Release, Update, UpdateStatus, UpdateType, User)\nfrom bodhi.server.tasks import work_on_bugs_task\nfrom bodhi.server.util import transactional_session_maker\n\nlog = logging.getLogger('bodhi')\n\n\nclass AutomaticUpdateHandler:\n \"\"\"\n The Bodhi Automatic Update Handler.\n\n A consumer that listens for messages about tagged builds and creates\n updates from them.\n \"\"\"\n\n def __init__(self, db_factory: transactional_session_maker = None):\n \"\"\"\n Initialize the Automatic Update Handler.\n\n Args:\n db_factory: If given, used as the db_factory for this handler. If\n None (the default), a new TransactionalSessionMaker is created and\n used.\n \"\"\"\n if not db_factory:\n self.db_factory = transactional_session_maker()\n else:\n self.db_factory = db_factory\n\n def __call__(self, message: fedora_messaging.api.Message) -> None:\n \"\"\"Create updates from appropriately tagged builds.\n\n Args:\n message: The message we are processing.\n \"\"\"\n body = message.body\n\n missing = []\n for mandatory in ('tag', 'build_id', 'name', 'version', 'release'):\n if mandatory not in body:\n missing.append(mandatory)\n if missing:\n log.debug(f\"Received incomplete tag message. Missing: {', '.join(missing)}\")\n return\n\n btag = body['tag']\n bnvr = '{name}-{version}-{release}'.format(**body)\n\n koji = buildsys.get_session()\n\n kbuildinfo = koji.getBuild(bnvr)\n if not kbuildinfo:\n log.debug(f\"Can't find Koji build for {bnvr}.\")\n return\n\n if 'nvr' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'nvr'.\")\n return\n\n if 'owner_name' not in kbuildinfo:\n log.debug(f\"Koji build info for {bnvr} doesn't contain 'owner_name'.\")\n return\n\n if kbuildinfo['owner_name'] in config.get('automatic_updates_blacklist'):\n log.debug(f\"{bnvr} owned by {kbuildinfo['owner_name']} who is listed in \"\n \"automatic_updates_blacklist, skipping.\")\n return\n\n # some APIs want the Koji build info, some others want the same\n # wrapped in a larger (request?) structure\n rbuildinfo = {\n 'info': kbuildinfo,\n 'nvr': kbuildinfo['nvr'].rsplit('-', 2),\n }\n\n with self.db_factory() as dbsession:\n rel = dbsession.query(Release).filter_by(create_automatic_updates=True,\n candidate_tag=btag).first()\n if not rel:\n log.debug(f\"Ignoring build being tagged into {btag!r}, no release configured for \"\n \"automatic updates for it found.\")\n return\n\n bcls = ContentType.infer_content_class(Build, kbuildinfo)\n build = bcls.get(bnvr)\n if build and build.update:\n log.info(f\"Build, active update for {bnvr} exists already, skipping.\")\n return\n\n if not build:\n log.debug(f\"Build for {bnvr} doesn't exist yet, creating.\")\n\n # Package.get_or_create() infers content type already\n log.debug(\"Getting/creating related package object.\")\n pkg = Package.get_or_create(dbsession, rbuildinfo)\n\n log.debug(\"Creating build object, adding it to the DB.\")\n build = bcls(nvr=bnvr, package=pkg, release=rel)\n dbsession.add(build)\n\n owner_name = kbuildinfo['owner_name']\n user = User.get(owner_name)\n if not user:\n log.debug(f\"Creating bodhi user for '{owner_name}'.\")\n # Leave email, groups blank, these will be filled\n # in or updated when they log into Bodhi next time, see\n # bodhi.server.security:remember_me().\n user = User(name=owner_name)\n dbsession.add(user)\n\n log.debug(f\"Creating new update for {bnvr}.\")\n changelog = build.get_changelog(lastupdate=True)\n closing_bugs = []\n if changelog:\n log.debug(\"Adding changelog to update notes.\")\n notes = f\"\"\"Automatic update for {bnvr}.\n\n##### **Changelog**\n\n```\n{changelog}\n```\"\"\"\n\n for b in re.finditer(config.get('bz_regex'), changelog, re.IGNORECASE):\n idx = int(b.group(1))\n log.debug(f'Adding bug #{idx} to the update.')\n bug = Bug.get(idx)\n if bug is None:\n bug = Bug(bug_id=idx)\n dbsession.add(bug)\n dbsession.flush()\n if bug not in closing_bugs:\n closing_bugs.append(bug)\n else:\n notes = f\"Automatic update for {bnvr}.\"\n update = Update(\n release=rel,\n builds=[build],\n bugs=closing_bugs,\n notes=notes,\n type=UpdateType.unspecified,\n stable_karma=3,\n unstable_karma=-3,\n autokarma=False,\n user=user,\n status=UpdateStatus.pending,\n )\n\n # Comment on the update that it was automatically created.\n update.comment(\n dbsession,\n str(\"This update was automatically created\"),\n author=\"bodhi\",\n )\n\n update.add_tag(update.release.pending_signing_tag)\n\n log.debug(\"Adding new update to the database.\")\n dbsession.add(update)\n\n log.debug(\"Flushing changes to the database.\")\n dbsession.flush()\n\n # Obsolete older updates which may be stuck in testing due to failed gating\n try:\n update.obsolete_older_updates(dbsession)\n except Exception as e:\n log.error(f'Problem obsoleting older updates: {e}')\n\n alias = update.alias\n\n # This must be run after dbsession is closed so changes are committed to db\n work_on_bugs_task.delay(alias, closing_bugs)\n",
"path": "bodhi/server/consumers/automatic_updates.py"
}
] | diff --git a/bodhi/server/consumers/automatic_updates.py b/bodhi/server/consumers/automatic_updates.py
index d553fe6973..bd14056520 100644
--- a/bodhi/server/consumers/automatic_updates.py
+++ b/bodhi/server/consumers/automatic_updates.py
@@ -199,6 +199,7 @@ def __call__(self, message: fedora_messaging.api.Message) -> None:
except Exception as e:
log.error(f'Problem obsoleting older updates: {e}')
+ alias = update.alias
+
# This must be run after dbsession is closed so changes are committed to db
- alias = update.alias
work_on_bugs_task.delay(alias, closing_bugs)
diff --git a/news/4147.bug b/news/4147.bug
new file mode 100644
index 0000000000..e202f2617d
--- /dev/null
+++ b/news/4147.bug
@@ -0,0 +1 @@
+Fixed a crash in automatic_updates handler due to trying access update.alias after the session was closed
| Crash in automatic update handler when submitting work_on_bugs_task
From bodhi-consumer logs:
```
2020-10-25 11:17:14,460 INFO [fedora_messaging.twisted.protocol][MainThread] Consuming message from topic org.fedoraproject.prod.buildsys.tag (message id c2d97737-444f-49b4-b4ca-1efb3a05e941)
2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Received message from fedora-messaging with topic: org.fedoraproject.prod.buildsys.tag
2020-10-25 11:17:14,463 INFO [bodhi][PoolThread-twisted.internet.reactor-1] ginac-1.7.9-5.fc34 tagged into f34-updates-candidate
2020-10-25 11:17:14,469 INFO [bodhi][PoolThread-twisted.internet.reactor-1] Build was not submitted, skipping
2020-10-25 11:17:14,838 INFO [bodhi.server][PoolThread-twisted.internet.reactor-1] Sending mail to [email protected]: [Fedora Update] [comment] ginac-1.7.9-5.fc34
2020-10-25 11:17:15,016 ERROR [bodhi][PoolThread-twisted.internet.reactor-1] Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3): Unable to handle message in Automatic Update handler: Id: c2d97737-444f-49b4-b4ca-1efb3a05e941
Topic: org.fedoraproject.prod.buildsys.tag
Headers: {
"fedora_messaging_schema": "base.message",
"fedora_messaging_severity": 20,
"sent-at": "2020-10-25T11:17:14+00:00"
}
Body: {
"build_id": 1634116,
"instance": "primary",
"name": "ginac",
"owner": "---",
"release": "5.fc34",
"tag": "f34-updates-candidate",
"tag_id": 27040,
"user": "---",
"version": "1.7.9"
}
Traceback (most recent call last):
File "/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/__init__.py", line 79, in __call__
handler_info.handler(msg)
File "/usr/local/lib/python3.8/site-packages/bodhi/server/consumers/automatic_updates.py", line 197, in __call__
alias = update.alias
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 287, in __get__
return self.impl.get(instance_state(instance), dict_)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/attributes.py", line 718, in get
value = state._load_expired(state, passive)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/state.py", line 652, in _load_expired
self.manager.deferred_scalar_loader(self, toload)
File "/usr/lib64/python3.8/site-packages/sqlalchemy/orm/loading.py", line 944, in load_scalar_attributes
raise orm_exc.DetachedInstanceError(
sqlalchemy.orm.exc.DetachedInstanceError: Instance <Update at 0x7fa3740f5910> is not bound to a Session; attribute refresh operation cannot proceed (Background on this error at: http://sqlalche.me/e/13/bhk3 )
2020-10-25 11:17:15,053 WARNI [fedora_messaging.twisted.protocol][MainThread] Returning message id c2d97737-444f-49b4-b4ca-1efb3a05e941 to the queue
```
|
falconry__falcon-602 | [
{
"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nHTTP_METHODS = (\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n)\n\nDEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'\n\n\n# Hoist classes and functions into the falcon namespace\nfrom falcon.version import __version__ # NOQA\nfrom falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA\nfrom falcon.status_codes import * # NOQA\nfrom falcon.errors import * # NOQA\nfrom falcon.redirects import * # NOQA\nfrom falcon.http_error import HTTPError # NOQA\nfrom falcon.util import * # NOQA\nfrom falcon.hooks import before, after # NOQA\nfrom falcon.request import Request, RequestOptions # NOQA\nfrom falcon.response import Response # NOQA\n",
"path": "falcon/__init__.py"
}
] | [
{
"content": "# Copyright 2013 by Rackspace Hosting, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nHTTP_METHODS = (\n 'CONNECT',\n 'DELETE',\n 'GET',\n 'HEAD',\n 'OPTIONS',\n 'PATCH',\n 'POST',\n 'PUT',\n 'TRACE',\n)\n\nDEFAULT_MEDIA_TYPE = 'application/json; charset=utf-8'\n\n\n# Hoist classes and functions into the falcon namespace\nfrom falcon.version import __version__ # NOQA\nfrom falcon.api import API, DEFAULT_MEDIA_TYPE # NOQA\nfrom falcon.status_codes import * # NOQA\nfrom falcon.errors import * # NOQA\nfrom falcon.redirects import * # NOQA\nfrom falcon.http_error import HTTPError # NOQA\nfrom falcon.http_status import HTTPStatus # NOQA\nfrom falcon.util import * # NOQA\nfrom falcon.hooks import before, after # NOQA\nfrom falcon.request import Request, RequestOptions # NOQA\nfrom falcon.response import Response # NOQA\n",
"path": "falcon/__init__.py"
}
] | diff --git a/falcon/__init__.py b/falcon/__init__.py
index 92ecb20dc..c0214b849 100644
--- a/falcon/__init__.py
+++ b/falcon/__init__.py
@@ -34,6 +34,7 @@
from falcon.errors import * # NOQA
from falcon.redirects import * # NOQA
from falcon.http_error import HTTPError # NOQA
+from falcon.http_status import HTTPStatus # NOQA
from falcon.util import * # NOQA
from falcon.hooks import before, after # NOQA
from falcon.request import Request, RequestOptions # NOQA
| Hoist HTTPStatus into falcon top-level namespace
I.e., add an import line to `falcon/__init__.py`
|
litestar-org__litestar-2244 | [
{
"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup\nfrom .commands import core, schema, sessions\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n import click\n from click import Context, group, option, pass_context\n from click import Path as ClickPath\nelse:\n import rich_click as click\n from rich_click import Context, group, option, pass_context\n from rich_click import Path as ClickPath\n from rich_click.cli import patch as rich_click_patch\n\n rich_click_patch()\n click.rich_click.USE_RICH_MARKUP = True\n click.rich_click.USE_MARKDOWN = False\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n click.rich_click.MAX_WIDTH = 100\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n\n\n__all__ = (\"litestar_group\",)\n\n\n@group(cls=LitestarExtensionGroup, context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\n@option(\"--app\", \"app_path\", help=\"Module path to a Litestar application\")\n@option(\n \"--app-dir\",\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.\",\n default=None,\n type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),\n show_default=False,\n)\n@pass_context\ndef litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:\n \"\"\"Litestar CLI.\"\"\"\n sys.path.append(str(app_dir))\n\n if ctx.obj is None: # env has not been loaded yet, so we can lazy load it\n ctx.obj = lambda: LitestarEnv.from_env(app_path)\n\n\n# add sub commands here\n\nlitestar_group.add_command(core.info_command)\nlitestar_group.add_command(core.run_command)\nlitestar_group.add_command(core.routes_command)\nlitestar_group.add_command(core.version_command)\nlitestar_group.add_command(sessions.sessions_group)\nlitestar_group.add_command(schema.schema_group)\n",
"path": "litestar/cli/main.py"
}
] | [
{
"content": "from __future__ import annotations\n\nimport sys\nfrom pathlib import Path\nfrom typing import TYPE_CHECKING\n\nfrom ._utils import RICH_CLICK_INSTALLED, LitestarEnv, LitestarExtensionGroup\nfrom .commands import core, schema, sessions\n\nif TYPE_CHECKING or not RICH_CLICK_INSTALLED: # pragma: no cover\n import click\n from click import Context, group, option, pass_context\n from click import Path as ClickPath\nelse:\n import rich_click as click\n from rich_click import Context, group, option, pass_context\n from rich_click import Path as ClickPath\n from rich_click.cli import patch as rich_click_patch\n\n rich_click_patch()\n click.rich_click.USE_RICH_MARKUP = True\n click.rich_click.USE_MARKDOWN = False\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.SHOW_ARGUMENTS = True\n click.rich_click.GROUP_ARGUMENTS_OPTIONS = True\n click.rich_click.STYLE_ERRORS_SUGGESTION = \"magenta italic\"\n click.rich_click.ERRORS_SUGGESTION = \"\"\n click.rich_click.ERRORS_EPILOGUE = \"\"\n click.rich_click.MAX_WIDTH = 80\n click.rich_click.SHOW_METAVARS_COLUMN = True\n click.rich_click.APPEND_METAVARS_HELP = True\n\n\n__all__ = (\"litestar_group\",)\n\n\n@group(cls=LitestarExtensionGroup, context_settings={\"help_option_names\": [\"-h\", \"--help\"]})\n@option(\"--app\", \"app_path\", help=\"Module path to a Litestar application\")\n@option(\n \"--app-dir\",\n help=\"Look for APP in the specified directory, by adding this to the PYTHONPATH. Defaults to the current working directory.\",\n default=None,\n type=ClickPath(dir_okay=True, file_okay=False, path_type=Path),\n show_default=False,\n)\n@pass_context\ndef litestar_group(ctx: Context, app_path: str | None, app_dir: Path | None = None) -> None:\n \"\"\"Litestar CLI.\"\"\"\n sys.path.append(str(app_dir))\n\n if ctx.obj is None: # env has not been loaded yet, so we can lazy load it\n ctx.obj = lambda: LitestarEnv.from_env(app_path)\n\n\n# add sub commands here\n\nlitestar_group.add_command(core.info_command)\nlitestar_group.add_command(core.run_command)\nlitestar_group.add_command(core.routes_command)\nlitestar_group.add_command(core.version_command)\nlitestar_group.add_command(sessions.sessions_group)\nlitestar_group.add_command(schema.schema_group)\n",
"path": "litestar/cli/main.py"
}
] | diff --git a/litestar/cli/main.py b/litestar/cli/main.py
index 4b43ecc888..0fd802e476 100644
--- a/litestar/cli/main.py
+++ b/litestar/cli/main.py
@@ -27,7 +27,7 @@
click.rich_click.STYLE_ERRORS_SUGGESTION = "magenta italic"
click.rich_click.ERRORS_SUGGESTION = ""
click.rich_click.ERRORS_EPILOGUE = ""
- click.rich_click.MAX_WIDTH = 100
+ click.rich_click.MAX_WIDTH = 80
click.rich_click.SHOW_METAVARS_COLUMN = True
click.rich_click.APPEND_METAVARS_HELP = True
| StaticFilesConfig and virtual directories
I'm trying to write a ``FileSystemProtocol`` to load files from the package data using [importlib_resources](https://importlib-resources.readthedocs.io/en/latest/using.html#). But because ``directories`` is defined as ``DirectoryPath``, pydantic checks if the given directories exist in the local filesystem.
This is not generally true, especially in any kind of virtual filesystem (e.g. a zipped package). I think this condition should be relaxed to support virtual filesystems.
https://github.com/starlite-api/starlite/blob/9bb6dcd57c10a591377cf8e3a537e9292566d5b9/starlite/config/static_files.py#L32
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.